diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 762e1683..00000000 --- a/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -.DS_Store -*.xpr - -# Packages -.venv -*.egg -*.egg-info - -# Build directories -target/ -publish-docs/ -build/ -/build-*.log.gz - -# Testenvironment -.tox/ - -# Transifex Client Setting -.tx - -# Editors -*~ -.*.swp -.bak diff --git a/.gitreview b/.gitreview deleted file mode 100644 index ebfed28e..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/ha-guide.git diff --git a/README.rst b/README.rst index 1fc53151..cd0f8453 100644 --- a/README.rst +++ b/README.rst @@ -1,65 +1,13 @@ -OpenStack High Availability Guide -+++++++++++++++++++++++++++++++++ +This project is no longer maintained. -This repository contains the OpenStack High Availability Guide. +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -For more details, see the `OpenStack Documentation wiki page -`_. +The content has been merged into the openstack-manuals repository at +http://git.openstack.org/cgit/openstack/openstack-manuals/ -Building -======== - -The root directory of the *OpenStack High Availability Guide* -is ``doc/ha-guide``. - -To build the guide, run ``tox -e docs``. - -Testing of changes and building of the manual -============================================= - -Install the python tox package and run ``tox`` from the top-level -directory to use the same tests that are done as part of our Jenkins -gating jobs. - -If you like to run individual tests, run: - - * ``tox -e checkniceness`` - to run the niceness tests - * ``tox -e checkbuild`` - to actually build the manual - -tox will use the openstack-doc-tools package for execution of these -tests. - - -Contributing -============ - -Our community welcomes all people interested in open source cloud -computing, and encourages you to join the `OpenStack Foundation -`_. - -The best way to get involved with the community is to talk with others -online or at a meet up and offer contributions through our processes, -the `OpenStack wiki `_, blogs, or on IRC at -``#openstack`` on ``irc.freenode.net``. - -We welcome all types of contributions, from blueprint designs to -documentation to testing to deployment scripts. - -If you would like to contribute to the documents, please see the -`OpenStack Documentation Contributor Guide -`_. - - -Bugs -==== - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/openstack-manuals - - -Installing -========== - -Refer to http://docs.openstack.org to see where these documents are published -and to learn more about the OpenStack project. +For any further questions, please email +openstack-docs@lists.openstack.org or join #openstack-doc on +Freenode. diff --git a/doc-test.conf b/doc-test.conf deleted file mode 100644 index c1851fa1..00000000 --- a/doc-test.conf +++ /dev/null @@ -1,2 +0,0 @@ -[DEFAULT] -repo_name = ha-guide diff --git a/doc-tools-check-languages.conf b/doc-tools-check-languages.conf deleted file mode 100644 index 30d30e04..00000000 --- a/doc-tools-check-languages.conf +++ /dev/null @@ -1,31 +0,0 @@ -# Configuration for translation setup. - -# directories to be set up -declare -A DIRECTORIES=( -) - -# books to be built -declare -A BOOKS=( - ["ja"]="ha-guide" -) - -# draft books -declare -A DRAFTS=( - ["ja"]="ha-guide" -) - -# Where does the top-level pom live? -# Set to empty to not copy it. -POM_FILE="" - -# Location of doc dir -DOC_DIR="doc/" - -# Books with special handling -# Values need to match content in project-config/jenkins/scripts/common_translation_update.sh -declare -A SPECIAL_BOOKS -SPECIAL_BOOKS=( - ["ha-guide"]="RST" - # These are translated in openstack-manuals - ["common"]="skip" -) diff --git a/doc/common/README.txt b/doc/common/README.txt deleted file mode 100644 index f46538ad..00000000 --- a/doc/common/README.txt +++ /dev/null @@ -1,7 +0,0 @@ -Important note about this directory -=================================== - -Because this directory is synced from openstack-manuals, make any changes in -openstack-manuals/doc/common. After changes to the synced files merge to -openstack-manuals/doc/common, a patch is automatically proposed for this -directory. diff --git a/doc/common/app_support.rst b/doc/common/app_support.rst deleted file mode 100644 index 79ca3ad3..00000000 --- a/doc/common/app_support.rst +++ /dev/null @@ -1,256 +0,0 @@ -.. ## WARNING ########################################################## -.. This file is synced from openstack/openstack-manuals repository to -.. other related repositories. If you need to make changes to this file, -.. make the changes in openstack-manuals. After any change merged to, -.. openstack-manuals, automatically a patch for others will be proposed. -.. ##################################################################### - -================= -Community support -================= - -The following resources are available to help you run and use OpenStack. -The OpenStack community constantly improves and adds to the main -features of OpenStack, but if you have any questions, do not hesitate to -ask. Use the following resources to get OpenStack support, and -troubleshoot your installations. - -Documentation -~~~~~~~~~~~~~ - -For the available OpenStack documentation, see -`docs.openstack.org `__. - -To provide feedback on documentation, join and use the -openstack-docs@lists.openstack.org mailing list at `OpenStack -Documentation Mailing -List `__, -or `report a -bug `__. - -The following books explain how to install an OpenStack cloud and its -associated components: - -* `Installation Guide for openSUSE Leap 42.1 and SUSE Linux Enterprise - Server 12 SP1 - `__ - -* `Installation Guide for Red Hat Enterprise Linux 7 and CentOS 7 - `__ - -* `Installation Guide for Ubuntu 14.04 (LTS) - `__ - -The following books explain how to configure and run an OpenStack cloud: - -* `Architecture Design Guide `__ - -* `Administrator Guide `__ - -* `Configuration Reference `__ - -* `Operations Guide `__ - -* `Networking Guide `__ - -* `High Availability Guide `__ - -* `Security Guide `__ - -* `Virtual Machine Image Guide `__ - -The following books explain how to use the OpenStack dashboard and -command-line clients: - -* `API Guide `__ - -* `End User Guide `__ - -* `Command-Line Interface Reference - `__ - -The following documentation provides reference and guidance information -for the OpenStack APIs: - -* `API Complete Reference - (HTML) `__ - -* `API Complete Reference - (PDF) `__ - -The following guide provides how to contribute to OpenStack documentation: - -* `Documentation Contributor Guide `__ - -ask.openstack.org -~~~~~~~~~~~~~~~~~ - -During the set up or testing of OpenStack, you might have questions -about how a specific task is completed or be in a situation where a -feature does not work correctly. Use the -`ask.openstack.org `__ site to ask questions -and get answers. When you visit the https://ask.openstack.org site, scan -the recently asked questions to see whether your question has already -been answered. If not, ask a new question. Be sure to give a clear, -concise summary in the title and provide as much detail as possible in -the description. Paste in your command output or stack traces, links to -screen shots, and any other information which might be useful. - -OpenStack mailing lists -~~~~~~~~~~~~~~~~~~~~~~~ - -A great way to get answers and insights is to post your question or -problematic scenario to the OpenStack mailing list. You can learn from -and help others who might have similar issues. To subscribe or view the -archives, go to -http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack. If you are -interested in the other mailing lists for specific projects or development, -refer to `Mailing Lists `__. - -The OpenStack wiki -~~~~~~~~~~~~~~~~~~ - -The `OpenStack wiki `__ contains a broad -range of topics but some of the information can be difficult to find or -is a few pages deep. Fortunately, the wiki search feature enables you to -search by title or content. If you search for specific information, such -as about networking or OpenStack Compute, you can find a large amount -of relevant material. More is being added all the time, so be sure to -check back often. You can find the search box in the upper-right corner -of any OpenStack wiki page. - -The Launchpad Bugs area -~~~~~~~~~~~~~~~~~~~~~~~ - -The OpenStack community values your set up and testing efforts and wants -your feedback. To log a bug, you must sign up for a Launchpad account at -https://launchpad.net/+login. You can view existing bugs and report bugs -in the Launchpad Bugs area. Use the search feature to determine whether -the bug has already been reported or already been fixed. If it still -seems like your bug is unreported, fill out a bug report. - -Some tips: - -* Give a clear, concise summary. - -* Provide as much detail as possible in the description. Paste in your - command output or stack traces, links to screen shots, and any other - information which might be useful. - -* Be sure to include the software and package versions that you are - using, especially if you are using a development branch, such as, - ``"Kilo release" vs git commit bc79c3ecc55929bac585d04a03475b72e06a3208``. - -* Any deployment-specific information is helpful, such as whether you - are using Ubuntu 14.04 or are performing a multi-node installation. - -The following Launchpad Bugs areas are available: - -* `Bugs: OpenStack Block Storage - (cinder) `__ - -* `Bugs: OpenStack Compute (nova) `__ - -* `Bugs: OpenStack Dashboard - (horizon) `__ - -* `Bugs: OpenStack Identity - (keystone) `__ - -* `Bugs: OpenStack Image service - (glance) `__ - -* `Bugs: OpenStack Networking - (neutron) `__ - -* `Bugs: OpenStack Object Storage - (swift) `__ - -* `Bugs: Application catalog (murano) `__ - -* `Bugs: Bare metal service (ironic) `__ - -* `Bugs: Clustering service (senlin) `__ - -* `Bugs: Containers service (magnum) `__ - -* `Bugs: Data processing service - (sahara) `__ - -* `Bugs: Database service (trove) `__ - -* `Bugs: Deployment service (fuel) `__ - -* `Bugs: DNS service (designate) `__ - -* `Bugs: Key Manager Service (barbican) `__ - -* `Bugs: Monitoring (monasca) `__ - -* `Bugs: Orchestration (heat) `__ - -* `Bugs: Rating (cloudkitty) `__ - -* `Bugs: Shared file systems (manila) `__ - -* `Bugs: Telemetry - (ceilometer) `__ - -* `Bugs: Telemetry v3 - (gnocchi) `__ - -* `Bugs: Workflow service - (mistral) `__ - -* `Bugs: Messaging service - (zaqar) `__ - -* `Bugs: OpenStack API Documentation - (developer.openstack.org) `__ - -* `Bugs: OpenStack Documentation - (docs.openstack.org) `__ - -The OpenStack IRC channel -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The OpenStack community lives in the #openstack IRC channel on the -Freenode network. You can hang out, ask questions, or get immediate -feedback for urgent and pressing issues. To install an IRC client or use -a browser-based client, go to -`https://webchat.freenode.net/ `__. You can -also use Colloquy (Mac OS X, http://colloquy.info/), mIRC (Windows, -http://www.mirc.com/), or XChat (Linux). When you are in the IRC channel -and want to share code or command output, the generally accepted method -is to use a Paste Bin. The OpenStack project has one at -http://paste.openstack.org. Just paste your longer amounts of text or -logs in the web form and you get a URL that you can paste into the -channel. The OpenStack IRC channel is ``#openstack`` on -``irc.freenode.net``. You can find a list of all OpenStack IRC channels -at https://wiki.openstack.org/wiki/IRC. - -Documentation feedback -~~~~~~~~~~~~~~~~~~~~~~ - -To provide feedback on documentation, join and use the -openstack-docs@lists.openstack.org mailing list at `OpenStack -Documentation Mailing -List `__, -or `report a -bug `__. - -OpenStack distribution packages -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following Linux distributions provide community-supported packages -for OpenStack: - -* **Debian:** https://wiki.debian.org/OpenStack - -* **CentOS, Fedora, and Red Hat Enterprise Linux:** - https://www.rdoproject.org/ - -* **openSUSE and SUSE Linux Enterprise Server:** - https://en.opensuse.org/Portal:OpenStack - -* **Ubuntu:** https://wiki.ubuntu.com/ServerTeam/CloudArchive diff --git a/doc/common/conventions.rst b/doc/common/conventions.rst deleted file mode 100644 index b3cbabb2..00000000 --- a/doc/common/conventions.rst +++ /dev/null @@ -1,47 +0,0 @@ -.. ## WARNING ########################################################## -.. This file is synced from openstack/openstack-manuals repository to -.. other related repositories. If you need to make changes to this file, -.. make the changes in openstack-manuals. After any change merged to, -.. openstack-manuals, automatically a patch for others will be proposed. -.. ##################################################################### - -=========== -Conventions -=========== - -The OpenStack documentation uses several typesetting conventions. - -Notices -~~~~~~~ - -Notices take these forms: - -.. note:: A comment with additional information that explains a part of the - text. - -.. important:: Something you must be aware of before proceeding. - -.. tip:: An extra but helpful piece of practical advice. - -.. caution:: Helpful information that prevents the user from making mistakes. - -.. warning:: Critical information about the risk of data loss or security - issues. - -Command prompts -~~~~~~~~~~~~~~~ - -.. code-block:: console - - $ command - -Any user, including the ``root`` user, can run commands that are -prefixed with the ``$`` prompt. - -.. code-block:: console - - # command - -The ``root`` user must run commands that are prefixed with the ``#`` -prompt. You can also prefix these commands with the :command:`sudo` -command, if available, to run them. diff --git a/doc/common/glossary.rst b/doc/common/glossary.rst deleted file mode 100644 index fe9cbc41..00000000 --- a/doc/common/glossary.rst +++ /dev/null @@ -1,3950 +0,0 @@ -======== -Glossary -======== - -.. comments - This file is automatically generated, edit the master doc/glossary/glossary-terms.xml to update it. - -This glossary offers a list of terms and definitions to define a -vocabulary for OpenStack-related concepts. - -To add to OpenStack glossary, clone the `openstack/openstack-manuals repository `__ and update the source file -``doc/glossary/glossary-terms.xml`` through the -OpenStack contribution process. - -.. glossary:: - - 6to4 - - A mechanism that allows IPv6 packets to be transmitted - over an IPv4 network, providing a strategy for migrating to - IPv6. - - absolute limit - - Impassable limits for guest VMs. Settings include total RAM - size, maximum number of vCPUs, and maximum disk size. - - access control list - - A list of permissions attached to an object. An ACL specifies - which users or system processes have access to objects. It also - defines which operations can be performed on specified objects. Each - entry in a typical ACL specifies a subject and an operation. For - instance, the ACL entry ``(Alice, delete)`` for a file gives - Alice permission to delete the file. - - access key - - Alternative term for an Amazon EC2 access key. See EC2 access - key. - - account - - The Object Storage context of an account. Do not confuse with a - user account from an authentication service, such as Active Directory, - /etc/passwd, OpenLDAP, OpenStack Identity, and so on. - - account auditor - - Checks for missing replicas and incorrect or corrupted objects - in a specified Object Storage account by running queries against the - back-end SQLite database. - - account database - - A SQLite database that contains Object Storage accounts and - related metadata and that the accounts server accesses. - - account reaper - - An Object Storage worker that scans for and deletes account - databases and that the account server has marked for deletion. - - account server - - Lists containers in Object Storage and stores container - information in the account database. - - account service - - An Object Storage component that provides account services such - as list, create, modify, and audit. Do not confuse with OpenStack - Identity service, OpenLDAP, or similar user-account services. - - accounting - - The Compute service provides accounting information through the - event notification and system usage data facilities. - - ACL - - See access control list. - - active/active configuration - - In a high-availability setup with an active/active - configuration, several systems share the load together and if one - fails, the load is distributed to the remaining systems. - - Active Directory - - Authentication and identity service by Microsoft, based on LDAP. - Supported in OpenStack. - - active/passive configuration - - In a high-availability setup with an active/passive - configuration, systems are set up to bring additional resources online - to replace those that have failed. - - address pool - - A group of fixed and/or floating IP addresses that are assigned - to a project and can be used by or assigned to the VM instances in a - project. - - admin API - - A subset of API calls that are accessible to authorized - administrators and are generally not accessible to end users or the - public Internet. They can exist as a separate service (keystone) or - can be a subset of another API (nova). - - administrator - - The person responsible for installing, configuring, - and managing an OpenStack cloud. - - admin server - - In the context of the Identity service, the worker process that - provides access to the admin API. - - Advanced Message Queuing Protocol (AMQP) - - The open standard messaging protocol used by OpenStack - components for intra-service communications, provided by RabbitMQ, - Qpid, or ZeroMQ. - - Advanced RISC Machine (ARM) - - Lower power consumption CPU often found in mobile and embedded - devices. Supported by OpenStack. - - alert - - The Compute service can send alerts through its notification - system, which includes a facility to create custom notification - drivers. Alerts can be sent to and displayed on the horizon - dashboard. - - allocate - - The process of taking a floating IP address from the address - pool so it can be associated with a fixed IP on a guest VM - instance. - - Amazon Kernel Image (AKI) - - Both a VM container format and disk format. Supported by Image - service. - - Amazon Machine Image (AMI) - - Both a VM container format and disk format. Supported by Image - service. - - Amazon Ramdisk Image (ARI) - - Both a VM container format and disk format. Supported by Image - service. - - Anvil - - A project that ports the shell script-based project named - DevStack to Python. - - Apache - - The Apache Software Foundation supports the Apache community of - open-source software projects. These projects provide software - products for the public good. - - Apache License 2.0 - - All OpenStack core projects are provided under the terms of the - Apache License 2.0 license. - - Apache Web Server - - The most common web server software currently used on the - Internet. - - API endpoint - - The daemon, worker, or service that a client communicates with - to access an API. API endpoints can provide any number of services, - such as authentication, sales data, performance meters, Compute VM - commands, census data, and so on. - - API extension - - Custom modules that extend some OpenStack core APIs. - - API extension plug-in - - Alternative term for a Networking plug-in or Networking API - extension. - - API key - - Alternative term for an API token. - - API server - - Any node running a daemon or worker that provides an API - endpoint. - - API token - - Passed to API requests and used by OpenStack to verify that the - client is authorized to run the requested operation. - - API version - - In OpenStack, the API version for a project is part of the URL. - For example, ``example.com/nova/v1/foobar``. - - applet - - A Java program that can be embedded into a web page. - - Application Programming Interface (API) - - A collection of specifications used to access a service, - application, or program. Includes service calls, required parameters - for each call, and the expected return values. - - Application Catalog service - - OpenStack project that provides an application catalog - service so that users can compose and deploy composite - environments on an application abstraction level while - managing the application lifecycle. The code name of the - project is murano. - - application server - - A piece of software that makes available another piece of - software over a network. - - Application Service Provider (ASP) - - Companies that rent specialized applications that help - businesses and organizations provide additional services - with lower cost. - - Address Resolution Protocol (ARP) - - The protocol by which layer-3 IP addresses are resolved into - layer-2 link local addresses. - - arptables - - Tool used for maintaining Address Resolution Protocol packet - filter rules in the Linux kernel firewall modules. Used along with - iptables, ebtables, and ip6tables in Compute to provide firewall - services for VMs. - - associate - - The process associating a Compute floating IP address with a - fixed IP address. - - Asynchronous JavaScript and XML (AJAX) - - A group of interrelated web development techniques used on the - client-side to create asynchronous web applications. Used extensively - in horizon. - - ATA over Ethernet (AoE) - - A disk storage protocol tunneled within Ethernet. - - attach - - The process of connecting a VIF or vNIC to a L2 network in - Networking. In the context of Compute, this process connects a storage - volume to an instance. - - attachment (network) - - Association of an interface ID to a logical port. Plugs an - interface into a port. - - auditing - - Provided in Compute through the system usage data - facility. - - auditor - - A worker process that verifies the integrity of Object Storage - objects, containers, and accounts. Auditors is the collective term for - the Object Storage account auditor, container auditor, and object - auditor. - - Austin - - The code name for the initial release of - OpenStack. The first design summit took place in - Austin, Texas, US. - - auth node - - Alternative term for an Object Storage authorization - node. - - authentication - - The process that confirms that the user, process, or client is - really who they say they are through private key, secret token, - password, fingerprint, or similar method. - - authentication token - - A string of text provided to the client after authentication. - Must be provided by the user or process in subsequent requests to the - API endpoint. - - AuthN - - The Identity service component that provides authentication - services. - - authorization - - The act of verifying that a user, process, or client is - authorized to perform an action. - - authorization node - - An Object Storage node that provides authorization - services. - - AuthZ - - The Identity component that provides high-level - authorization services. - - Auto ACK - - Configuration setting within RabbitMQ that enables or disables - message acknowledgment. Enabled by default. - - auto declare - - A Compute RabbitMQ setting that determines whether a message - exchange is automatically created when the program starts. - - availability zone - - An Amazon EC2 concept of an isolated area that is used for fault - tolerance. Do not confuse with an OpenStack Compute zone or - cell. - - AWS - - Amazon Web Services. - - AWS CloudFormation template - - AWS CloudFormation allows AWS users to create and manage a - collection of related resources. The Orchestration service - supports a CloudFormation-compatible format (CFN). - - back end - - Interactions and processes that are obfuscated from the user, - such as Compute volume mount, data transmission to an iSCSI target by - a daemon, or Object Storage object integrity checks. - - back-end catalog - - The storage method used by the Identity service catalog service - to store and retrieve information about API endpoints that are - available to the client. Examples include an SQL database, LDAP - database, or KVS back end. - - back-end store - - The persistent data store used to save and retrieve information - for a service, such as lists of Object Storage objects, current state - of guest VMs, lists of user names, and so on. Also, the method that the - Image service uses to get and store VM images. Options include Object - Storage, local file system, S3, and HTTP. - - backup restore and disaster recovery as a service - - The OpenStack project that provides integrated tooling for - backing up, restoring, and recovering file systems, - instances, or database backups. The project name is freezer. - - bandwidth - - The amount of available data used by communication resources, - such as the Internet. Represents the amount of data that is used to - download things or the amount of data available to download. - - barbican - - Code name of the key management service for OpenStack. - - bare - - An Image service container format that indicates that no - container exists for the VM image. - - Bare Metal service - - OpenStack project that provisions bare metal, as opposed to - virtual, machines. The code name for the project is ironic. - - base image - - An OpenStack-provided image. - - Bell-LaPadula model - - A security model that focuses on data confidentiality - and controlled access to classified information. - This model divide the entities into subjects and objects. - The clearance of a subject is compared to the classification of the - object to determine if the subject is authorized for the specific access mode. - The clearance or classification scheme is expressed in terms of a lattice. - - Benchmark service - - OpenStack project that provides a framework for - performance analysis and benchmarking of individual - OpenStack components as well as full production OpenStack - cloud deployments. The code name of the project is rally. - - Bexar - - A grouped release of projects related to - OpenStack that came out in February of 2011. It - included only Compute (nova) and Object Storage (swift). - Bexar is the code name for the second release of - OpenStack. The design summit took place in - San Antonio, Texas, US, which is the county seat for Bexar county. - - binary - - Information that consists solely of ones and zeroes, which is - the language of computers. - - bit - - A bit is a single digit number that is in base of 2 (either a - zero or one). Bandwidth usage is measured in bits per second. - - bits per second (BPS) - - The universal measurement of how quickly data is transferred - from place to place. - - block device - - A device that moves data in the form of blocks. These device - nodes interface the devices, such as hard disks, CD-ROM drives, flash - drives, and other addressable regions of memory. - - block migration - - A method of VM live migration used by KVM to evacuate instances - from one host to another with very little downtime during a - user-initiated switchover. Does not require shared storage. Supported - by Compute. - - Block Storage service - - The OpenStack core project that enables management of volumes, - volume snapshots, and volume types. The project name of Block Storage - is cinder. - - Block Storage API - - An API on a separate endpoint for attaching, - detaching, and creating block storage for compute - VMs. - - BMC - - Baseboard Management Controller. The intelligence in the IPMI - architecture, which is a specialized micro-controller that is embedded - on the motherboard of a computer and acts as a server. Manages the - interface between system management software and platform - hardware. - - bootable disk image - - A type of VM image that exists as a single, bootable - file. - - Bootstrap Protocol (BOOTP) - - A network protocol used by a network client to obtain an IP - address from a configuration server. Provided in Compute through the - dnsmasq daemon when using either the FlatDHCP manager or VLAN manager - network manager. - - Border Gateway Protocol (BGP) - - The Border Gateway Protocol is a dynamic routing protocol - that connects autonomous systems. Considered the - backbone of the Internet, this protocol connects disparate - networks to form a larger network. - - browser - - Any client software that enables a computer or device to access - the Internet. - - builder file - - Contains configuration information that Object Storage uses to - reconfigure a ring or to re-create it from scratch after a serious - failure. - - bursting - - The practice of utilizing a secondary environment to - elastically build instances on-demand when the primary - environment is resource constrained. - - button class - - A group of related button types within horizon. Buttons to - start, stop, and suspend VMs are in one class. Buttons to associate - and disassociate floating IP addresses are in another class, and so - on. - - byte - - Set of bits that make up a single character; there are usually 8 - bits to a byte. - - CA - - Certificate Authority or Certification Authority. In - cryptography, an entity that issues digital certificates. The digital - certificate certifies the ownership of a public key by the named - subject of the certificate. This enables others (relying parties) to - rely upon signatures or assertions made by the private key that - corresponds to the certified public key. In this model of trust - relationships, a CA is a trusted third party for both the subject - (owner) of the certificate and the party relying upon the certificate. - CAs are characteristic of many public key infrastructure (PKI) - schemes. - - cache pruner - - A program that keeps the Image service VM image cache at or - below its configured maximum size. - - Cactus - - An OpenStack grouped release of projects that came out in the - spring of 2011. It included Compute (nova), Object Storage (swift), - and the Image service (glance). - Cactus is a city in Texas, US and is the code name for - the third release of OpenStack. When OpenStack releases went - from three to six months long, the code name of the release - changed to match a geography nearest the previous - summit. - - CADF - - Cloud Auditing Data Federation (CADF) is a - specification for audit event data. CADF is - supported by OpenStack Identity. - - CALL - - One of the RPC primitives used by the OpenStack message queue - software. Sends a message and waits for a response. - - capability - - Defines resources for a cell, including CPU, storage, and - networking. Can apply to the specific services within a cell or a - whole cell. - - capacity cache - - A Compute back-end database table that contains the current - workload, amount of free RAM, and number of VMs running on each host. - Used to determine on which host a VM starts. - - capacity updater - - A notification driver that monitors VM instances and updates the - capacity cache as needed. - - CAST - - One of the RPC primitives used by the OpenStack message queue - software. Sends a message and does not wait for a response. - - catalog - - A list of API endpoints that are available to a user after - authentication with the Identity service. - - catalog service - - An Identity service that lists API endpoints that are available - to a user after authentication with the Identity service. - - ceilometer - - The project name for the Telemetry service, which is an - integrated project that provides metering and measuring facilities for - OpenStack. - - cell - - Provides logical partitioning of Compute resources in a child - and parent relationship. Requests are passed from parent cells to - child cells if the parent cannot provide the requested - resource. - - cell forwarding - - A Compute option that enables parent cells to pass resource - requests to child cells if the parent cannot provide the requested - resource. - - cell manager - - The Compute component that contains a list of the current - capabilities of each host within the cell and routes requests as - appropriate. - - CentOS - - A Linux distribution that is compatible with OpenStack. - - Ceph - - Massively scalable distributed storage system that consists of - an object store, block store, and POSIX-compatible distributed file - system. Compatible with OpenStack. - - CephFS - - The POSIX-compliant file system provided by Ceph. - - certificate authority - - A simple certificate authority provided by Compute for cloudpipe - VPNs and VM image decryption. - - Challenge-Handshake Authentication Protocol (CHAP) - - An iSCSI authentication method supported by Compute. - - chance scheduler - - A scheduling method used by Compute that randomly chooses an - available host from the pool. - - changes since - - A Compute API parameter that downloads changes to the requested - item since your last request, instead of downloading a new, fresh set - of data and comparing it against the old data. - - Chef - - An operating system configuration management tool supporting - OpenStack deployments. - - child cell - - If a requested resource such as CPU time, disk storage, or - memory is not available in the parent cell, the request is forwarded - to its associated child cells. If the child cell can fulfill the - request, it does. Otherwise, it attempts to pass the request to any of - its children. - - cinder - - A core OpenStack project that provides block storage services - for VMs. - - CirrOS - - A minimal Linux distribution designed for use as a test - image on clouds such as OpenStack. - - Cisco neutron plug-in - - A Networking plug-in for Cisco devices and technologies, - including UCS and Nexus. - - cloud architect - - A person who plans, designs, and oversees the creation of - clouds. - - cloud computing - - A model that enables access to a shared pool of configurable - computing resources, such as networks, servers, storage, applications, - and services, that can be rapidly provisioned and released with - minimal management effort or service provider interaction. - - cloud controller - - Collection of Compute components that represent the global state - of the cloud; talks to services, such as Identity authentication, - Object Storage, and node/storage workers through a - queue. - - cloud controller node - - A node that runs network, volume, API, scheduler, and image - services. Each service may be broken out into separate nodes for - scalability or availability. - - Cloud Data Management Interface (CDMI) - - SINA standard that defines a RESTful API for managing objects in - the cloud, currently unsupported in OpenStack. - - Cloud Infrastructure Management Interface (CIMI) - - An in-progress specification for cloud management. Currently - unsupported in OpenStack. - - cloud-init - - A package commonly installed in VM images that performs - initialization of an instance after boot using information that it - retrieves from the metadata service, such as the SSH public key and - user data. - - cloudadmin - - One of the default roles in the Compute RBAC system. Grants - complete system access. - - Cloudbase-Init - - A Windows project providing guest initialization features, - similar to cloud-init. - - cloudpipe - - A compute service that creates VPNs on a per-project - basis. - - cloudpipe image - - A pre-made VM image that serves as a cloudpipe server. - Essentially, OpenVPN running on Linux. - - Clustering service - - The OpenStack project that OpenStack project that implements - clustering services and libraries for the management of - groups of homogeneous objects exposed by other OpenStack - services. The project name of Clustering service is - senlin. - - CMDB - - Configuration Management Database. - - congress - - OpenStack project that provides the Governance service. - - command filter - - Lists allowed commands within the Compute rootwrap - facility. - - Common Internet File System (CIFS) - - A file sharing protocol. It is a public or open variation of the - original Server Message Block (SMB) protocol developed and used by - Microsoft. Like the SMB protocol, CIFS runs at a higher level and uses - the TCP/IP protocol. - - community project - - A project that is not officially endorsed by the OpenStack - Foundation. If the project is successful enough, it might be elevated - to an incubated project and then to a core project, or it might be - merged with the main code trunk. - - compression - - Reducing the size of files by special encoding, the file can be - decompressed again to its original content. OpenStack supports - compression at the Linux file system level but does not support - compression for things such as Object Storage objects or Image service - VM images. - - Compute service - - The OpenStack core project that provides compute services. The - project name of Compute service is nova. - - Compute API - - The nova-api daemon - provides access to nova services. Can communicate with other APIs, - such as the Amazon EC2 API. - - compute controller - - The Compute component that chooses suitable hosts on which to - start VM instances. - - compute host - - Physical host dedicated to running compute nodes. - - compute node - - A node that runs the nova-compute daemon that manages VM - instances that provide a wide - range of services, such as web applications and analytics. - - Compute service - - Name for the Compute component that manages VMs. - - compute worker - - The Compute component that runs on each compute node and manages - the VM instance lifecycle, including run, reboot, terminate, - attach/detach volumes, and so on. Provided by the nova-compute daemon. - - concatenated object - - A set of segment objects that Object Storage combines and sends - to the client. - - conductor - - In Compute, conductor is the process that proxies database - requests from the compute process. Using conductor improves security - because compute nodes do not need direct access to the - database. - - consistency window - - The amount of time it takes for a new Object Storage object to - become accessible to all clients. - - console log - - Contains the output from a Linux VM console in Compute. - - container - - Organizes and stores objects in Object Storage. Similar to the - concept of a Linux directory but cannot be nested. Alternative term - for an Image service container format. - - container auditor - - Checks for missing replicas or incorrect objects in specified - Object Storage containers through queries to the SQLite back-end - database. - - container database - - A SQLite database that stores Object Storage containers and - container metadata. The container server accesses this - database. - - container format - - A wrapper used by the Image service that contains a VM image and - its associated metadata, such as machine state, OS disk size, and so - on. - - container server - - An Object Storage server that manages containers. - - Containers service - - OpenStack project that provides a set of services for - management of application containers in a multi-tenant cloud - environment. The code name of the project name is magnum. - - container service - - The Object Storage component that provides container services, - such as create, delete, list, and so on. - - content delivery network (CDN) - - A content delivery network is a specialized network that is - used to distribute content to clients, typically located - close to the client for increased performance. - - controller node - - Alternative term for a cloud controller node. - - core API - - Depending on context, the core API is either the OpenStack API - or the main API of a specific core project, such as Compute, - Networking, Image service, and so on. - - core service - - An official OpenStack service defined as core by - DefCore Committee. Currently, consists of - Block Storage service (cinder), Compute service (nova), - Identity service (keystone), Image service (glance), - Networking service (neutron), and Object Storage service (swift). - - cost - - Under the Compute distributed scheduler, this is calculated by - looking at the capabilities of each host relative to the flavor of the - VM instance being requested. - - credentials - - Data that is only known to or accessible by a user and - used to verify that the user is who he says he is. - Credentials are presented to the server during - authentication. Examples include a password, secret key, - digital certificate, and fingerprint. - - Cross-Origin Resource Sharing (CORS) - - A mechanism that allows many resources (for example, - fonts, JavaScript) on a web page to be requested from - another domain outside the domain from which the resource - originated. In particular, JavaScript's AJAX calls can use - the XMLHttpRequest mechanism. - - Crowbar - - An open source community project by Dell that aims to provide - all necessary services to quickly deploy clouds. - - current workload - - An element of the Compute capacity cache that is calculated - based on the number of build, snapshot, migrate, and resize operations - currently in progress on a given host. - - customer - - Alternative term for tenant. - - customization module - - A user-created Python module that is loaded by horizon to change - the look and feel of the dashboard. - - daemon - - A process that runs in the background and waits for requests. - May or may not listen on a TCP or UDP port. Do not confuse with a - worker. - - DAC - - Discretionary access control. Governs the ability of subjects to - access objects, while enabling users to make policy decisions and - assign security attributes. The traditional UNIX system of users, - groups, and read-write-execute permissions is an example of - DAC. - - Dashboard - - The web-based management interface for OpenStack. An alternative - name for horizon. - - data encryption - - Both Image service and Compute support encrypted virtual machine - (VM) images (but not instances). In-transit data encryption is - supported in OpenStack using technologies such as HTTPS, SSL, TLS, and - SSH. Object Storage does not support object encryption at the - application level but may support storage that uses disk encryption. - - database ID - - A unique ID given to each replica of an Object Storage - database. - - database replicator - - An Object Storage component that copies changes in the account, - container, and object databases to other nodes. - - Database service - - An integrated project that provide scalable and reliable - Cloud Database-as-a-Service functionality for both - relational and non-relational database engines. The project - name of Database service is trove. - - Data Processing service - - OpenStack project that provides a scalable - data-processing stack and associated management - interfaces. The code name for the project is sahara. - - data store - - A database engine supported by the Database service. - - deallocate - - The process of removing the association between a floating IP - address and a fixed IP address. Once this association is removed, the - floating IP returns to the address pool. - - Debian - - A Linux distribution that is compatible with OpenStack. - - deduplication - - The process of finding duplicate data at the disk block, file, - and/or object level to minimize storage use—currently unsupported - within OpenStack. - - default panel - - The default panel that is displayed when a user accesses the - horizon dashboard. - - default tenant - - New users are assigned to this tenant if no tenant is specified - when a user is created. - - default token - - An Identity service token that is not associated with a specific - tenant and is exchanged for a scoped token. - - delayed delete - - An option within Image service so that an image is deleted after - a predefined number of seconds instead of immediately. - - delivery mode - - Setting for the Compute RabbitMQ message delivery mode; can be - set to either transient or persistent. - - denial of service (DoS) - - Denial of service (DoS) is a short form for - denial-of-service attack. This is a malicious attempt to - prevent legitimate users from using a service. - - deprecated auth - - An option within Compute that enables administrators to create - and manage users through the ``nova-manage`` command as - opposed to using the Identity service. - - designate - - Code name for the DNS service project for OpenStack. - - Desktop-as-a-Service - - A platform that provides a suite of desktop environments - that users access to receive a desktop experience from - any location. This may provide general use, development, or - even homogeneous testing environments. - - developer - - One of the default roles in the Compute RBAC system and the - default role assigned to a new user. - - device ID - - Maps Object Storage partitions to physical storage - devices. - - device weight - - Distributes partitions proportionately across Object Storage - devices based on the storage capacity of each device. - - DevStack - - Community project that uses shell scripts to quickly build - complete OpenStack development environments. - - DHCP - - Dynamic Host Configuration Protocol. A network protocol that - configures devices that are connected to a network so that they can - communicate on that network by using the Internet Protocol (IP). The - protocol is implemented in a client-server model where DHCP clients - request configuration data, such as an IP address, a default route, - and one or more DNS server addresses from a DHCP server. - - DHCP agent - - OpenStack Networking agent that provides DHCP services - for virtual networks. - - Diablo - - A grouped release of projects related to OpenStack that came out - in the fall of 2011, the fourth release of OpenStack. It included - Compute (nova 2011.3), Object Storage (swift 1.4.3), and the Image - service (glance). - Diablo is the code name for the fourth release of - OpenStack. The design summit took place in - the Bay Area near Santa Clara, - California, US and Diablo is a nearby city. - - direct consumer - - An element of the Compute RabbitMQ that comes to life when a RPC - call is executed. It connects to a direct exchange through a unique - exclusive queue, sends the message, and terminates. - - direct exchange - - A routing table that is created within the Compute RabbitMQ - during RPC calls; one is created for each RPC call that is - invoked. - - direct publisher - - Element of RabbitMQ that provides a response to an incoming MQ - message. - - disassociate - - The process of removing the association between a floating IP - address and fixed IP and thus returning the floating IP address to the - address pool. - - disk encryption - - The ability to encrypt data at the file system, disk partition, - or whole-disk level. Supported within Compute VMs. - - disk format - - The underlying format that a disk image for a VM is stored as - within the Image service back-end store. For example, AMI, ISO, QCOW2, - VMDK, and so on. - - dispersion - - In Object Storage, tools to test and ensure dispersion of - objects and containers to ensure fault tolerance. - - distributed virtual router (DVR) - - Mechanism for highly-available multi-host routing when using - OpenStack Networking (neutron). - - Django - - A web framework used extensively in horizon. - - DNS - - Domain Name System. A hierarchical and distributed naming system - for computers, services, and resources connected to the Internet or a - private network. Associates a human-friendly names to IP - addresses. - - DNS record - - A record that specifies information about a particular domain - and belongs to the domain. - - DNS service - - OpenStack project that provides scalable, on demand, self - service access to authoritative DNS services, in a - technology-agnostic manner. The code name for the project is - designate. - - dnsmasq - - Daemon that provides DNS, DHCP, BOOTP, and TFTP services for - virtual networks. - - domain - - An Identity API v3 entity. Represents a collection of - projects, groups and users that defines administrative boundaries for - managing OpenStack Identity entities. - On the Internet, separates a website from other sites. Often, - the domain name has two or more parts that are separated by dots. - For example, yahoo.com, usa.gov, harvard.edu, or - mail.yahoo.com. - Also, a domain is an entity or container of all DNS-related - information containing one or more records. - - Domain Name System (DNS) - - A system by which Internet domain name-to-address and - address-to-name resolutions are determined. - DNS helps navigate the Internet by translating the IP address - into an address that is easier to remember. For example, translating - 111.111.111.1 into www.yahoo.com. - All domains and their components, such as mail servers, utilize - DNS to resolve to the appropriate locations. DNS servers are usually - set up in a master-slave relationship such that failure of the master - invokes the slave. DNS servers might also be clustered or replicated - such that changes made to one DNS server are automatically propagated - to other active servers. - In Compute, the support that enables associating DNS entries - with floating IP addresses, nodes, or cells so that hostnames are - consistent across reboots. - - download - - The transfer of data, usually in the form of files, from one - computer to another. - - DRTM - - Dynamic root of trust measurement. - - durable exchange - - The Compute RabbitMQ message exchange that remains active when - the server restarts. - - durable queue - - A Compute RabbitMQ message queue that remains active when the - server restarts. - - Dynamic Host Configuration Protocol (DHCP) - - A method to automatically configure networking for a host at - boot time. Provided by both Networking and Compute. - - Dynamic HyperText Markup Language (DHTML) - - Pages that use HTML, JavaScript, and Cascading Style Sheets to - enable users to interact with a web page or show simple - animation. - - east-west traffic - - Network traffic between servers in the same cloud or data center. - See also north-south traffic. - - EBS boot volume - - An Amazon EBS storage volume that contains a bootable VM image, - currently unsupported in OpenStack. - - ebtables - - Filtering tool for a Linux bridging firewall, enabling - filtering of network traffic passing through a Linux bridge. - Used in Compute along with arptables, iptables, and ip6tables - to ensure isolation of network communications. - - EC2 - - The Amazon commercial compute product, similar to - Compute. - - EC2 access key - - Used along with an EC2 secret key to access the Compute EC2 - API. - - EC2 API - - OpenStack supports accessing the Amazon EC2 API through - Compute. - - EC2 Compatibility API - - A Compute component that enables OpenStack to communicate with - Amazon EC2. - - EC2 secret key - - Used along with an EC2 access key when communicating with the - Compute EC2 API; used to digitally sign each request. - - Elastic Block Storage (EBS) - - The Amazon commercial block storage product. - - encryption - - OpenStack supports encryption technologies such as HTTPS, SSH, - SSL, TLS, digital certificates, and data encryption. - - endpoint - - See API endpoint. - - endpoint registry - - Alternative term for an Identity service catalog. - - encapsulation - - The practice of placing one packet type within another for - the purposes of abstracting or securing data. Examples - include GRE, MPLS, or IPsec. - - endpoint template - - A list of URL and port number endpoints that indicate where a - service, such as Object Storage, Compute, Identity, and so on, can be - accessed. - - entity - - Any piece of hardware or software that wants to connect to the - network services provided by Networking, the network connectivity - service. An entity can make use of Networking by implementing a - VIF. - - ephemeral image - - A VM image that does not save changes made to its volumes and - reverts them to their original state after the instance is - terminated. - - ephemeral volume - - Volume that does not save the changes made to it and reverts to - its original state when the current user relinquishes control. - - Essex - - A grouped release of projects related to OpenStack that came out - in April 2012, the fifth release of OpenStack. It included Compute - (nova 2012.1), Object Storage (swift 1.4.8), Image (glance), Identity - (keystone), and Dashboard (horizon). - Essex is the code name for the fifth release of - OpenStack. The design summit took place in - Boston, Massachusetts, US and Essex is a nearby city. - - ESXi - - An OpenStack-supported hypervisor. - - ETag - - MD5 hash of an object within Object Storage, used to ensure data - integrity. - - euca2ools - - A collection of command-line tools for administering VMs; most - are compatible with OpenStack. - - Eucalyptus Kernel Image (EKI) - - Used along with an ERI to create an EMI. - - Eucalyptus Machine Image (EMI) - - VM image container format supported by Image service. - - Eucalyptus Ramdisk Image (ERI) - - Used along with an EKI to create an EMI. - - evacuate - - The process of migrating one or all virtual machine (VM) - instances from one host to another, compatible with both shared - storage live migration and block migration. - - exchange - - Alternative term for a RabbitMQ message exchange. - - exchange type - - A routing algorithm in the Compute RabbitMQ. - - exclusive queue - - Connected to by a direct consumer in RabbitMQ—Compute, the - message can be consumed only by the current connection. - - extended attributes (xattr) - - File system option that enables storage of additional - information beyond owner, group, permissions, modification time, and - so on. The underlying Object Storage file system must support extended - attributes. - - extension - - Alternative term for an API extension or plug-in. In the context - of Identity service, this is a call that is specific to the - implementation, such as adding support for OpenID. - - external network - - A network segment typically used for instance Internet - access. - - extra specs - - Specifies additional requirements when Compute determines where - to start a new instance. Examples include a minimum amount of network - bandwidth or a GPU. - - FakeLDAP - - An easy method to create a local LDAP directory for testing - Identity and Compute. Requires Redis. - - fan-out exchange - - Within RabbitMQ and Compute, it is the messaging interface that - is used by the scheduler service to receive capability messages from - the compute, volume, and network nodes. - - federated identity - - A method to establish trusts between identity providers and the - OpenStack cloud. - - Fedora - - A Linux distribution compatible with OpenStack. - - Fibre Channel - - Storage protocol similar in concept to TCP/IP; encapsulates SCSI - commands and data. - - Fibre Channel over Ethernet (FCoE) - - The fibre channel protocol tunneled within Ethernet. - - fill-first scheduler - - The Compute scheduling method that attempts to fill a host with - VMs rather than starting new VMs on a variety of hosts. - - filter - - The step in the Compute scheduling process when hosts that - cannot run VMs are eliminated and not chosen. - - firewall - - Used to restrict communications between hosts and/or nodes, - implemented in Compute using iptables, arptables, ip6tables, and - ebtables. - - FWaaS - - A Networking extension that provides perimeter firewall - functionality. - - fixed IP address - - An IP address that is associated with the same instance each - time that instance boots, is generally not accessible to end users or - the public Internet, and is used for management of the - instance. - - Flat Manager - - The Compute component that gives IP addresses to authorized - nodes and assumes DHCP, DNS, and routing configuration and services - are provided by something else. - - flat mode injection - - A Compute networking method where the OS network configuration - information is injected into the VM image before the instance - starts. - - flat network - - Virtual network type that uses neither VLANs nor tunnels to - segregate tenant traffic. Each flat network typically requires - a separate underlying physical interface defined by bridge - mappings. However, a flat network can contain multiple - subnets. - - FlatDHCP Manager - - The Compute component that provides dnsmasq (DHCP, DNS, BOOTP, - TFTP) and radvd (routing) services. - - flavor - - Alternative term for a VM instance type. - - flavor ID - - UUID for each Compute or Image service VM flavor or instance - type. - - floating IP address - - An IP address that a project can associate with a VM so that the - instance has the same public IP address each time that it boots. You - create a pool of floating IP addresses and assign them to instances as - they are launched to maintain a consistent IP address for maintaining - DNS assignment. - - Folsom - - A grouped release of projects related to OpenStack that came out - in the fall of 2012, the sixth release of OpenStack. It includes - Compute (nova), Object Storage (swift), Identity (keystone), - Networking (neutron), Image service (glance), and Volumes or Block - Storage (cinder). - Folsom is the code name for the sixth release of - OpenStack. The design summit took place in - San Francisco, California, US and Folsom is a nearby city. - - FormPost - - Object Storage middleware that uploads (posts) an image through - a form on a web page. - - freezer - - OpenStack project that provides backup restore and disaster - recovery as a service. - - front end - - The point where a user interacts with a service; can be an API - endpoint, the horizon dashboard, or a command-line tool. - - gateway - - An IP address, typically assigned to a router, that - passes network traffic between different networks. - - generic receive offload (GRO) - - Feature of certain network interface drivers that - combines many smaller received packets into a large packet - before delivery to the kernel IP stack. - - generic routing encapsulation (GRE) - - Protocol that encapsulates a wide variety of network - layer protocols inside virtual point-to-point links. - - glance - - A core project that provides the OpenStack Image service. - - glance API server - - Processes client requests for VMs, updates Image service - metadata on the registry server, and communicates with the store - adapter to upload VM images from the back-end store. - - glance registry - - Alternative term for the Image service image registry. - - global endpoint template - - The Identity service endpoint template that contains services - available to all tenants. - - GlusterFS - - A file system designed to aggregate NAS hosts, compatible with - OpenStack. - - golden image - - A method of operating system installation where a finalized disk - image is created and then used by all nodes without - modification. - - Governance service - - OpenStack project to provide Governance-as-a-Service across - any collection of cloud services in order to monitor, - enforce, and audit policy over dynamic infrastructure. The - code name for the project is congress. - - Graphic Interchange Format (GIF) - - A type of image file that is commonly used for animated images - on web pages. - - Graphics Processing Unit (GPU) - - Choosing a host based on the existence of a GPU is currently - unsupported in OpenStack. - - Green Threads - - The cooperative threading model used by Python; reduces race - conditions and only context switches when specific library calls are - made. Each OpenStack service is its own thread. - - Grizzly - - The code name for the seventh release of - OpenStack. The design summit took place in - San Diego, California, US and Grizzly is an element of the state flag of - California. - - Group - - An Identity v3 API entity. Represents a collection of users that is - owned by a specific domain. - - guest OS - - An operating system instance running under the control of a - hypervisor. - - Hadoop - - Apache Hadoop is an open source software framework that supports - data-intensive distributed applications. - - Hadoop Distributed File System (HDFS) - - A distributed, highly fault-tolerant file system designed to run - on low-cost commodity hardware. - - handover - - An object state in Object Storage where a new replica of the - object is automatically created due to a drive failure. - - hard reboot - - A type of reboot where a physical or virtual power button is - pressed as opposed to a graceful, proper shutdown of the operating - system. - - Havana - - The code name for the eighth release of OpenStack. The - design summit took place in Portland, Oregon, US and Havana is - an unincorporated community in Oregon. - - heat - - An integrated project that aims to orchestrate multiple cloud - applications for OpenStack. - - Heat Orchestration Template (HOT) - - Heat input in the format native to OpenStack. - - health monitor - - Determines whether back-end members of a VIP pool can - process a request. A pool can have several health monitors - associated with it. When a pool has several monitors - associated with it, all monitors check each member of the - pool. All monitors must declare a member to be healthy for - it to stay active. - - high availability (HA) - - A high availability system design approach and associated - service implementation ensures that a prearranged level of - operational performance will be met during a contractual - measurement period. High availability systems seeks to - minimize system downtime and data loss. - - horizon - - OpenStack project that provides a dashboard, which is a web - interface. - - horizon plug-in - - A plug-in for the OpenStack dashboard (horizon). - - host - - A physical computer, not a VM instance (node). - - host aggregate - - A method to further subdivide availability zones into hypervisor - pools, a collection of common hosts. - - Host Bus Adapter (HBA) - - Device plugged into a PCI slot, such as a fibre channel or - network card. - - hybrid cloud - - A hybrid cloud is a composition of two or more clouds - (private, community or public) that remain distinct entities - but are bound together, offering the benefits of multiple - deployment models. Hybrid cloud can also mean the ability - to connect colocation, managed and/or dedicated services - with cloud resources. - - Hyper-V - - One of the hypervisors supported by OpenStack. - - hyperlink - - Any kind of text that contains a link to some other site, - commonly found in documents where clicking on a word or words opens up - a different website. - - Hypertext Transfer Protocol (HTTP) - - An application protocol for distributed, collaborative, - hypermedia information systems. It is the foundation of data - communication for the World Wide Web. Hypertext is structured - text that uses logical links (hyperlinks) between nodes containing - text. HTTP is the protocol to exchange or transfer hypertext. - - Hypertext Transfer Protocol Secure (HTTPS) - - An encrypted communications protocol for secure communication - over a computer network, with especially wide deployment on the - Internet. Technically, it is not a protocol in and of itself; - rather, it is the result of simply layering the Hypertext Transfer - Protocol (HTTP) on top of the TLS or SSL protocol, thus adding the - security capabilities of TLS or SSL to standard HTTP communications. - most OpenStack API endpoints and many inter-component communications - support HTTPS communication. - - hypervisor - - Software that arbitrates and controls VM access to the actual - underlying hardware. - - hypervisor pool - - A collection of hypervisors grouped together through host - aggregates. - - IaaS - - Infrastructure-as-a-Service. IaaS is a provisioning model in - which an organization outsources physical components of a data center, - such as storage, hardware, servers, and networking components. A - service provider owns the equipment and is responsible for housing, - operating and maintaining it. The client typically pays on a per-use - basis. IaaS is a model for providing cloud services. - - Icehouse - - The code name for the ninth release of OpenStack. The - design summit took place in Hong Kong and Ice House is a - street in that city. - - ICMP - - Internet Control Message Protocol, used by network - devices for control messages. For example, - :command:`ping` uses ICMP to test - connectivity. - - ID number - - Unique numeric ID associated with each user in Identity, - conceptually similar to a Linux or LDAP UID. - - Identity API - - Alternative term for the Identity service API. - - Identity back end - - The source used by Identity service to retrieve user - information; an OpenLDAP server, for example. - - identity provider - - A directory service, which allows users to login with a user - name and password. It is a typical source of authentication - tokens. - - Identity service - - The OpenStack core project that provides a central directory of - users mapped to the OpenStack services they can access. It also - registers endpoints for OpenStack services. It acts as a common - authentication system. The project name of Identity is - keystone. - - Identity service API - - The API used to access the OpenStack Identity service provided - through keystone. - - IDS - - Intrusion Detection System. - - image - - A collection of files for a specific operating system (OS) that - you use to create or rebuild a server. OpenStack provides pre-built - images. You can also create custom images, or snapshots, from servers - that you have launched. Custom images can be used for data backups or - as "gold" images for additional servers. - - Image API - - The Image service API endpoint for management of VM - images. - - image cache - - Used by Image service to obtain images on the local host rather - than re-downloading them from the image server each time one is - requested. - - image ID - - Combination of a URI and UUID used to access Image service VM - images through the image API. - - image membership - - A list of tenants that can access a given VM image within Image - service. - - image owner - - The tenant who owns an Image service virtual machine - image. - - image registry - - A list of VM images that are available through Image - service. - - Image service - - An OpenStack core project that provides discovery, registration, - and delivery services for disk and server images. The project name of - the Image service is glance. - - Image service API - - Alternative name for the glance image API. - - image status - - The current status of a VM image in Image service, not to be - confused with the status of a running instance. - - image store - - The back-end store used by Image service to store VM images, - options include Object Storage, local file system, S3, or HTTP. - - image UUID - - UUID used by Image service to uniquely identify each VM - image. - - incubated project - - A community project may be elevated to this status and is then - promoted to a core project. - - ingress filtering - - The process of filtering incoming network traffic. Supported by - Compute. - - INI - - The OpenStack configuration files use an INI format to - describe options and their values. It consists of sections - and key value pairs. - - injection - - The process of putting a file into a virtual machine image - before the instance is started. - - instance - - A running VM, or a VM in a known state such as suspended, that - can be used like a hardware server. - - instance ID - - Alternative term for instance UUID. - - instance state - - The current state of a guest VM image. - - instance tunnels network - - A network segment used for instance traffic tunnels - between compute nodes and the network node. - - instance type - - Describes the parameters of the various virtual machine images - that are available to users; includes parameters such as CPU, storage, - and memory. Alternative term for flavor. - - instance type ID - - Alternative term for a flavor ID. - - instance UUID - - Unique ID assigned to each guest VM instance. - - interface - - A physical or virtual device that provides connectivity - to another device or medium. - - interface ID - - Unique ID for a Networking VIF or vNIC in the form of a - UUID. - - Internet protocol (IP) - - Principal communications protocol in the internet protocol - suite for relaying datagrams across network boundaries. - - Internet Service Provider (ISP) - - Any business that provides Internet access to individuals or - businesses. - - Internet Small Computer System Interface (iSCSI) - - Storage protocol that encapsulates SCSI frames for transport - over IP networks. - - ironic - - OpenStack project that provisions bare metal, as opposed to - virtual, machines. - - IOPS - - IOPS (Input/Output Operations Per Second) are a common - performance measurement used to benchmark computer storage - devices like hard disk drives, solid state drives, and - storage area networks. - - IP address - - Number that is unique to every computer system on the Internet. - Two versions of the Internet Protocol (IP) are in use for addresses: - IPv4 and IPv6. - - IP Address Management (IPAM) - - The process of automating IP address allocation, deallocation, - and management. Currently provided by Compute, melange, and - Networking. - - IPL - - Initial Program Loader. - - IPMI - - Intelligent Platform Management Interface. IPMI is a - standardized computer system interface used by system administrators - for out-of-band management of computer systems and monitoring of their - operation. In layman's terms, it - is a way to manage a computer using a direct network connection, - whether it is turned on or not; connecting to the hardware rather than - an operating system or login shell. - - ip6tables - - Tool used to set up, maintain, and inspect the tables of IPv6 - packet filter rules in the Linux kernel. In OpenStack Compute, - ip6tables is used along with arptables, ebtables, and iptables to - create firewalls for both nodes and VMs. - - ipset - - Extension to iptables that allows creation of firewall rules - that match entire "sets" of IP addresses simultaneously. These - sets reside in indexed data structures to increase efficiency, - particularly on systems with a large quantity of rules. - - iptables - - Used along with arptables and ebtables, iptables create - firewalls in Compute. iptables are the tables provided by the Linux - kernel firewall (implemented as different Netfilter modules) and the - chains and rules it stores. Different kernel modules and programs are - currently used for different protocols: iptables applies to IPv4, - ip6tables to IPv6, arptables to ARP, and ebtables to Ethernet frames. - Requires root privilege to manipulate. - - IQN - - iSCSI Qualified Name (IQN) is the format most commonly used - for iSCSI names, which uniquely identify nodes in an iSCSI network. - All IQNs follow the pattern iqn.yyyy-mm.domain:identifier, where - 'yyyy-mm' is the year and month in which the domain was registered, - 'domain' is the reversed domain name of the issuing organization, and - 'identifier' is an optional string which makes each IQN under the same - domain unique. For example, 'iqn.2015-10.org.openstack.408ae959bce1'. - - iSCSI - - The SCSI disk protocol tunneled within Ethernet, supported by - Compute, Object Storage, and Image service. - - ISO9660 - - One of the VM image disk formats supported by Image - service. - - itsec - - A default role in the Compute RBAC system that can quarantine an - instance in any project. - - Java - - A programming language that is used to create systems that - involve more than one computer by way of a network. - - JavaScript - - A scripting language that is used to build web pages. - - JavaScript Object Notation (JSON) - - One of the supported response formats in OpenStack. - - Jenkins - - Tool used to run jobs automatically for OpenStack - development. - - jumbo frame - - Feature in modern Ethernet networks that supports frames up to - approximately 9000 bytes. - - Juno - - The code name for the tenth release of OpenStack. The - design summit took place in Atlanta, Georgia, US and Juno is - an unincorporated community in Georgia. - - Kerberos - - A network authentication protocol which works on the basis of - tickets. Kerberos allows nodes communication over a non-secure - network, and allows nodes to prove their identity to one another in a - secure manner. - - kernel-based VM (KVM) - - An OpenStack-supported hypervisor. KVM is a full - virtualization solution for Linux on x86 hardware containing - virtualization extensions (Intel VT or AMD-V), ARM, IBM - Power, and IBM zSeries. It consists of a loadable kernel - module, that provides the core virtualization infrastructure - and a processor specific module. - - Key Manager service - - OpenStack project that produces a secret storage and - generation system capable of providing key management for - services wishing to enable encryption features. The code name - of the project is barbican. - - keystone - - The project that provides OpenStack Identity services. - - Kickstart - - A tool to automate system configuration and installation on Red - Hat, Fedora, and CentOS-based Linux distributions. - - Kilo - - The code name for the eleventh release of OpenStack. The - design summit took place in Paris, France. Due to delays in the name - selection, the release was known only as K. Because ``k`` is the - unit symbol for kilo and the reference artifact is stored near Paris - in the Pavillon de Breteuil in Sèvres, the community chose Kilo as - the release name. - - large object - - An object within Object Storage that is larger than 5 GB. - - Launchpad - - The collaboration site for OpenStack. - - Layer-2 network - - Term used in the OSI network architecture for the data link - layer. The data link layer is responsible for media access - control, flow control and detecting and possibly correcting - errors that may occur in the physical layer. - - Layer-3 network - - Term used in the OSI network architecture for the network - layer. The network layer is responsible for packet - forwarding including routing from one node to another. - - Layer-2 (L2) agent - - OpenStack Networking agent that provides layer-2 - connectivity for virtual networks. - - Layer-3 (L3) agent - - OpenStack Networking agent that provides layer-3 - (routing) services for virtual networks. - - Liberty - - The code name for the twelfth release of OpenStack. The - design summit took place in Vancouver, Canada and Liberty is - the name of a village in the Canadian province of - Saskatchewan. - - libvirt - - Virtualization API library used by OpenStack to interact with - many of its supported hypervisors. - - Lightweight Directory Access Protocol (LDAP) - - An application protocol for accessing and maintaining distributed - directory information services over an IP network. - - Linux bridge - - Software that enables multiple VMs to share a single physical - NIC within Compute. - - Linux Bridge neutron plug-in - - Enables a Linux bridge to understand a Networking port, - interface attachment, and other abstractions. - - Linux containers (LXC) - - An OpenStack-supported hypervisor. - - live migration - - The ability within Compute to move running virtual machine - instances from one host to another with only a small service - interruption during switchover. - - load balancer - - A load balancer is a logical device that belongs to a cloud - account. It is used to distribute workloads between multiple back-end - systems or services, based on the criteria defined as part of its - configuration. - - load balancing - - The process of spreading client requests between two or more - nodes to improve performance and availability. - - LBaaS - - Enables Networking to distribute incoming requests evenly - between designated instances. - - Logical Volume Manager (LVM) - - Provides a method of allocating space on mass-storage - devices that is more flexible than conventional partitioning - schemes. - - magnum - - Code name for the OpenStack project that provides the - Containers Service. - - management API - - Alternative term for an admin API. - - management network - - A network segment used for administration, not accessible to the - public Internet. - - manager - - Logical groupings of related code, such as the Block Storage - volume manager or network manager. - - manifest - - Used to track segments of a large object within Object - Storage. - - manifest object - - A special Object Storage object that contains the manifest for a - large object. - - manila - - OpenStack project that provides shared file systems as - service to applications. - - maximum transmission unit (MTU) - - Maximum frame or packet size for a particular network - medium. Typically 1500 bytes for Ethernet networks. - - mechanism driver - - A driver for the Modular Layer 2 (ML2) neutron plug-in that - provides layer-2 connectivity for virtual instances. A - single OpenStack installation can use multiple mechanism - drivers. - - melange - - Project name for OpenStack Network Information Service. To be - merged with Networking. - - membership - - The association between an Image service VM image and a tenant. - Enables images to be shared with specified tenants. - - membership list - - A list of tenants that can access a given VM image within Image - service. - - memcached - - A distributed memory object caching system that is used by - Object Storage for caching. - - memory overcommit - - The ability to start new VM instances based on the actual memory - usage of a host, as opposed to basing the decision on the amount of - RAM each running instance thinks it has available. Also known as RAM - overcommit. - - message broker - - The software package used to provide AMQP messaging capabilities - within Compute. Default package is RabbitMQ. - - message bus - - The main virtual communication line used by all AMQP messages - for inter-cloud communications within Compute. - - message queue - - Passes requests from clients to the appropriate workers and - returns the output to the client after the job completes. - - Message service - - OpenStack project that aims to produce an OpenStack - messaging service that affords a variety of distributed - application patterns in an efficient, scalable and - highly-available manner, and to create and maintain associated - Python libraries and documentation. The code name for the - project is zaqar. - - Metadata agent - - OpenStack Networking agent that provides metadata - services for instances. - - Meta-Data Server (MDS) - - Stores CephFS metadata. - - migration - - The process of moving a VM instance from one host to - another. - - mistral - - OpenStack project that provides the Workflow service. - - Mitaka - - The code name for the thirteenth release of OpenStack. - The design summit took place in Tokyo, Japan. Mitaka - is a city in Tokyo. - - monasca - - OpenStack project that provides a Monitoring service. - - multi-host - - High-availability mode for legacy (nova) networking. - Each compute node handles NAT and DHCP and acts as a gateway - for all of the VMs on it. A networking failure on one compute - node doesn't affect VMs on other compute nodes. - - multinic - - Facility in Compute that allows each virtual machine instance to - have more than one VIF connected to it. - - murano - - OpenStack project that provides an Application catalog. - - Modular Layer 2 (ML2) neutron plug-in - - Can concurrently use multiple layer-2 networking technologies, - such as 802.1Q and VXLAN, in Networking. - - Monitor (LBaaS) - - LBaaS feature that provides availability monitoring using the - ``ping`` command, TCP, and HTTP/HTTPS GET. - - Monitor (Mon) - - A Ceph component that communicates with external clients, checks - data state and consistency, and performs quorum functions. - - Monitoring - - The OpenStack project that provides a multi-tenant, highly - scalable, performant, fault-tolerant Monitoring-as-a-Service - solution for metrics, complex event processing, and logging. - It builds an extensible platform for advanced monitoring - services that can be used by both operators and tenants to - gain operational insight and visibility, ensuring - availability and stability. The project name is monasca. - - multi-factor authentication - - Authentication method that uses two or more credentials, such as - a password and a private key. Currently not supported in - Identity. - - MultiNic - - Facility in Compute that enables a virtual machine instance to - have more than one VIF connected to it. - - Nebula - - Released as open source by NASA in 2010 and is the basis for - Compute. - - netadmin - - One of the default roles in the Compute RBAC system. Enables the - user to allocate publicly accessible IP addresses to instances and - change firewall rules. - - NetApp volume driver - - Enables Compute to communicate with NetApp storage devices - through the NetApp OnCommand - Provisioning Manager. - - network - - A virtual network that provides connectivity between entities. - For example, a collection of virtual ports that share network - connectivity. In Networking terminology, a network is always a layer-2 - network. - - NAT - - Network Address Translation; Process of modifying IP address - information while in transit. Supported by Compute and - Networking. - - network controller - - A Compute daemon that orchestrates the network configuration of - nodes, including IP addresses, VLANs, and bridging. Also manages - routing for both public and private networks. - - Network File System (NFS) - - A method for making file systems available over the network. - Supported by OpenStack. - - network ID - - Unique ID assigned to each network segment within Networking. - Same as network UUID. - - network manager - - The Compute component that manages various network components, - such as firewall rules, IP address allocation, and so on. - - network namespace - - Linux kernel feature that provides independent virtual - networking instances on a single host with separate routing - tables and interfaces. Similar to virtual routing and forwarding - (VRF) services on physical network equipment. - - network node - - Any compute node that runs the network worker daemon. - - network segment - - Represents a virtual, isolated OSI layer-2 subnet in - Networking. - - Newton - - The code name for the fourteenth release of OpenStack. The - design summit took place in Austin, Texas, US. The - release is named after "Newton House" which is located at - 1013 E. Ninth St., Austin, TX. which is listed on the - National Register of Historic Places. - - NTP - - Network Time Protocol; Method of keeping a clock for a host or - node correct via communication with a trusted, accurate time - source. - - network UUID - - Unique ID for a Networking network segment. - - network worker - - The ``nova-network`` worker daemon; provides - services such as giving an IP address to a booting nova - instance. - - Networking service - - A core OpenStack project that provides a network connectivity - abstraction layer to OpenStack Compute. The project name of Networking - is neutron. - - Networking API - - API used to access OpenStack Networking. Provides an extensible - architecture to enable custom plug-in creation. - - neutron - - A core OpenStack project that provides a network connectivity - abstraction layer to OpenStack Compute. - - neutron API - - An alternative name for Networking API. - - neutron manager - - Enables Compute and Networking integration, which enables - Networking to perform network management for guest VMs. - - neutron plug-in - - Interface within Networking that enables organizations to create - custom plug-ins for advanced features, such as QoS, ACLs, or - IDS. - - Nexenta volume driver - - Provides support for NexentaStor devices in Compute. - - No ACK - - Disables server-side message acknowledgment in the Compute - RabbitMQ. Increases performance but decreases reliability. - - node - - A VM instance that runs on a host. - - non-durable exchange - - Message exchange that is cleared when the service restarts. Its - data is not written to persistent storage. - - non-durable queue - - Message queue that is cleared when the service restarts. Its - data is not written to persistent storage. - - non-persistent volume - - Alternative term for an ephemeral volume. - - north-south traffic - - Network traffic between a user or client (north) and a - server (south), or traffic into the cloud (south) and - out of the cloud (north). See also east-west traffic. - - nova - - OpenStack project that provides compute services. - - Nova API - - Alternative term for the Compute API. - - nova-network - - A Compute component that manages IP address allocation, - firewalls, and other network-related tasks. This is the legacy - networking option and an alternative to Networking. - - object - - A BLOB of data held by Object Storage; can be in any - format. - - object auditor - - Opens all objects for an object server and verifies the MD5 - hash, size, and metadata for each object. - - object expiration - - A configurable option within Object Storage to automatically - delete objects after a specified amount of time has passed or a - certain date is reached. - - object hash - - Uniquely ID for an Object Storage object. - - object path hash - - Used by Object Storage to determine the location of an object in - the ring. Maps objects to partitions. - - object replicator - - An Object Storage component that copies an object to remote - partitions for fault tolerance. - - object server - - An Object Storage component that is responsible for managing - objects. - - Object Storage service - - The OpenStack core project that provides eventually consistent - and redundant storage and retrieval of fixed digital content. The - project name of OpenStack Object Storage is swift. - - Object Storage API - - API used to access OpenStack Object Storage. - - Object Storage Device (OSD) - - The Ceph storage daemon. - - object versioning - - Allows a user to set a flag on an Object Storage container so - that all objects within the container are versioned. - - Ocata - - The code name for the fifteenth release of OpenStack. The - design summit will take place in Barcelona, Spain. Ocata is - a beach north of Barcelona. - - Oldie - - Term for an Object Storage process that runs for a long time. - Can indicate a hung process. - - Open Cloud Computing Interface (OCCI) - - A standardized interface for managing compute, data, and network - resources, currently unsupported in OpenStack. - - Open Virtualization Format (OVF) - - Standard for packaging VM images. Supported in OpenStack. - - Open vSwitch - - Open vSwitch is a production quality, multilayer virtual - switch licensed under the open source Apache 2.0 license. It - is designed to enable massive network automation through - programmatic extension, while still supporting standard - management interfaces and protocols (for example NetFlow, - sFlow, SPAN, RSPAN, CLI, LACP, 802.1ag). - - Open vSwitch (OVS) agent - - Provides an interface to the underlying Open vSwitch service for - the Networking plug-in. - - Open vSwitch neutron plug-in - - Provides support for Open vSwitch in Networking. - - OpenLDAP - - An open source LDAP server. Supported by both Compute and - Identity. - - OpenStack - - OpenStack is a cloud operating system that controls large pools - of compute, storage, and networking resources throughout a data - center, all managed through a dashboard that gives administrators - control while empowering their users to provision resources through a - web interface. OpenStack is an open source project licensed under the - Apache License 2.0. - - OpenStack code name - - Each OpenStack release has a code name. Code names ascend in - alphabetical order: Austin, Bexar, Cactus, Diablo, Essex, - Folsom, Grizzly, Havana, Icehouse, Juno, Kilo, Liberty, - and Mitaka. - Code names are cities or counties near where the - corresponding OpenStack design summit took place. An - exception, called the Waldon exception, is granted to - elements of the state flag that sound especially cool. Code - names are chosen by popular vote. - - openSUSE - - A Linux distribution that is compatible with OpenStack. - - operator - - The person responsible for planning and maintaining an OpenStack - installation. - - optional service - - An official OpenStack service defined as optional by - DefCore Committee. Currently, consists of - Dashboard (horizon), Telemetry service (Telemetry), - Orchestration service (heat), Database service (trove), - Bare Metal service (ironic), and so on. - - Orchestration service - - An integrated project that orchestrates multiple cloud - applications for OpenStack. The project name of Orchestration is - heat. - - orphan - - In the context of Object Storage, this is a process that is not - terminated after an upgrade, restart, or reload of the service. - - Oslo - - OpenStack project that produces a set of Python libraries - containing code shared by OpenStack projects. - - parent cell - - If a requested resource, such as CPU time, disk storage, or - memory, is not available in the parent cell, the request is forwarded - to associated child cells. - - partition - - A unit of storage within Object Storage used to store objects. - It exists on top of devices and is replicated for fault - tolerance. - - partition index - - Contains the locations of all Object Storage partitions within - the ring. - - partition shift value - - Used by Object Storage to determine which partition data should - reside on. - - path MTU discovery (PMTUD) - - Mechanism in IP networks to detect end-to-end MTU and adjust - packet size accordingly. - - pause - - A VM state where no changes occur (no changes in memory, network - communications stop, etc); the VM is frozen but not shut down. - - PCI passthrough - - Gives guest VMs exclusive access to a PCI device. Currently - supported in OpenStack Havana and later releases. - - persistent message - - A message that is stored both in memory and on disk. The message - is not lost after a failure or restart. - - persistent volume - - Changes to these types of disk volumes are saved. - - personality file - - A file used to customize a Compute instance. It can be used to - inject SSH keys or a specific network configuration. - - Platform-as-a-Service (PaaS) - - Provides to the consumer the ability to deploy applications - through a programming language or tools supported by the cloud - platform provider. An example of Platform-as-a-Service is an - Eclipse/Java programming platform provided with no downloads - required. - - plug-in - - Software component providing the actual implementation for - Networking APIs, or for Compute APIs, depending on the context. - - policy service - - Component of Identity that provides a rule-management - interface and a rule-based authorization engine. - - pool - - A logical set of devices, such as web servers, that you - group together to receive and process traffic. The load - balancing function chooses which member of the pool handles - the new requests or connections received on the VIP - address. Each VIP has one pool. - - pool member - - An application that runs on the back-end server in a - load-balancing system. - - port - - A virtual network port within Networking; VIFs / vNICs are - connected to a port. - - port UUID - - Unique ID for a Networking port. - - preseed - - A tool to automate system configuration and installation on - Debian-based Linux distributions. - - private image - - An Image service VM image that is only available to specified - tenants. - - private IP address - - An IP address used for management and administration, not - available to the public Internet. - - private network - - The Network Controller provides virtual networks to enable - compute servers to interact with each other and with the public - network. All machines must have a public and private network - interface. A private network interface can be a flat or VLAN network - interface. A flat network interface is controlled by the - flat_interface with flat managers. A VLAN network interface is - controlled by the ``vlan_interface`` option with VLAN - managers. - - project - - Projects represent the base unit of “ownership” in OpenStack, - in that all resources in OpenStack should be owned by a specific project. - In OpenStack Identity, a project must be owned by a specific domain. - - project ID - - User-defined alphanumeric string in Compute; the name of a - project. - - project VPN - - Alternative term for a cloudpipe. - - promiscuous mode - - Causes the network interface to pass all traffic it - receives to the host rather than passing only the frames - addressed to it. - - protected property - - Generally, extra properties on an Image service image to - which only cloud administrators have access. Limits which user - roles can perform CRUD operations on that property. The cloud - administrator can configure any image property as - protected. - - provider - - An administrator who has access to all hosts and - instances. - - proxy node - - A node that provides the Object Storage proxy service. - - proxy server - - Users of Object Storage interact with the service through the - proxy server, which in turn looks up the location of the requested - data within the ring and returns the results to the user. - - public API - - An API endpoint used for both service-to-service communication - and end-user interactions. - - public image - - An Image service VM image that is available to all - tenants. - - public IP address - - An IP address that is accessible to end-users. - - public key authentication - - Authentication method that uses keys rather than - passwords. - - public network - - The Network Controller provides virtual networks to enable - compute servers to interact with each other and with the public - network. All machines must have a public and private network - interface. The public network interface is controlled by the - ``public_interface`` option. - - Puppet - - An operating system configuration-management tool supported by - OpenStack. - - Python - - Programming language used extensively in OpenStack. - - QEMU Copy On Write 2 (QCOW2) - - One of the VM image disk formats supported by Image - service. - - Qpid - - Message queue software supported by OpenStack; an alternative to - RabbitMQ. - - quarantine - - If Object Storage finds objects, containers, or accounts that - are corrupt, they are placed in this state, are not replicated, cannot - be read by clients, and a correct copy is re-replicated. - - Quick EMUlator (QEMU) - - QEMU is a generic and open source machine emulator and - virtualizer. - One of the hypervisors supported by OpenStack, generally used - for development purposes. - - quota - - In Compute and Block Storage, the ability to set resource limits - on a per-project basis. - - RabbitMQ - - The default message queue software used by OpenStack. - - Rackspace Cloud Files - - Released as open source by Rackspace in 2010; the basis for - Object Storage. - - RADOS Block Device (RBD) - - Ceph component that enables a Linux block device to be striped - over multiple distributed data stores. - - radvd - - The router advertisement daemon, used by the Compute VLAN - manager and FlatDHCP manager to provide routing services for VM - instances. - - rally - - OpenStack project that provides the Benchmark service. - - RAM filter - - The Compute setting that enables or disables RAM - overcommitment. - - RAM overcommit - - The ability to start new VM instances based on the actual memory - usage of a host, as opposed to basing the decision on the amount of - RAM each running instance thinks it has available. Also known as - memory overcommit. - - rate limit - - Configurable option within Object Storage to limit database - writes on a per-account and/or per-container basis. - - raw - - One of the VM image disk formats supported by Image service; an - unstructured disk image. - - rebalance - - The process of distributing Object Storage partitions across all - drives in the ring; used during initial ring creation and after ring - reconfiguration. - - reboot - - Either a soft or hard reboot of a server. With a soft reboot, - the operating system is signaled to restart, which enables a graceful - shutdown of all processes. A hard reboot is the equivalent of power - cycling the server. The virtualization platform should ensure that the - reboot action has completed successfully, even in cases in which the - underlying domain/VM is paused or halted/stopped. - - rebuild - - Removes all data on the server and replaces it with the - specified image. Server ID and IP addresses remain the same. - - Recon - - An Object Storage component that collects meters. - - record - - Belongs to a particular domain and is used to specify - information about the domain. - There are several types of DNS records. Each record type contains - particular information used to describe the purpose of that record. - Examples include mail exchange (MX) records, which specify the mail - server for a particular domain; and name server (NS) records, which - specify the authoritative name servers for a domain. - - record ID - - A number within a database that is incremented each time a - change is made. Used by Object Storage when replicating. - - Red Hat Enterprise Linux (RHEL) - - A Linux distribution that is compatible with OpenStack. - - reference architecture - - A recommended architecture for an OpenStack cloud. - - region - - A discrete OpenStack environment with dedicated API endpoints - that typically shares only the Identity (keystone) with other - regions. - - registry - - Alternative term for the Image service registry. - - registry server - - An Image service that provides VM image metadata information to - clients. - - Reliable, Autonomic Distributed Object Store - (RADOS) - - A collection of components that provides object storage within - Ceph. Similar to OpenStack Object Storage. - - Remote Procedure Call (RPC) - - The method used by the Compute RabbitMQ for intra-service - communications. - - replica - - Provides data redundancy and fault tolerance by creating copies - of Object Storage objects, accounts, and containers so that they are - not lost when the underlying storage fails. - - replica count - - The number of replicas of the data in an Object Storage - ring. - - replication - - The process of copying data to a separate physical device for - fault tolerance and performance. - - replicator - - The Object Storage back-end process that creates and manages - object replicas. - - request ID - - Unique ID assigned to each request sent to Compute. - - rescue image - - A special type of VM image that is booted when an instance is - placed into rescue mode. Allows an administrator to mount the file - systems for an instance to correct the problem. - - resize - - Converts an existing server to a different flavor, which scales - the server up or down. The original server is saved to enable rollback - if a problem occurs. All resizes must be tested and explicitly - confirmed, at which time the original server is removed. - - RESTful - - A kind of web service API that uses REST, or Representational - State Transfer. REST is the style of architecture for hypermedia - systems that is used for the World Wide Web. - - ring - - An entity that maps Object Storage data to partitions. A - separate ring exists for each service, such as account, object, and - container. - - ring builder - - Builds and manages rings within Object Storage, assigns - partitions to devices, and pushes the configuration to other storage - nodes. - - Role Based Access Control (RBAC) - - Provides a predefined list of actions that the user can perform, - such as start or stop VMs, reset passwords, and so on. Supported in - both Identity and Compute and can be configured using the - horizon dashboard. - - role - - A personality that a user assumes to perform a specific set of - operations. A role includes a set of rights and privileges. A user - assuming that role inherits those rights and privileges. - - role ID - - Alphanumeric ID assigned to each Identity service role. - - rootwrap - - A feature of Compute that allows the unprivileged "nova" user to - run a specified list of commands as the Linux root user. - - round-robin scheduler - - Type of Compute scheduler that evenly distributes instances - among available hosts. - - router - - A physical or virtual network device that passes network - traffic between different networks. - - routing key - - The Compute direct exchanges, fanout exchanges, and topic - exchanges use this key to determine how to process a message; - processing varies depending on exchange type. - - RPC driver - - Modular system that allows the underlying message queue software - of Compute to be changed. For example, from RabbitMQ to ZeroMQ or - Qpid. - - rsync - - Used by Object Storage to push object replicas. - - RXTX cap - - Absolute limit on the amount of network traffic a Compute VM - instance can send and receive. - - RXTX quota - - Soft limit on the amount of network traffic a Compute VM - instance can send and receive. - - S3 - - Object storage service by Amazon; similar in function to Object - Storage, it can act as a back-end store for Image service VM images. - - sahara - - OpenStack project that provides a scalable data-processing stack - and associated management interfaces. - - SAML assertion - - Contains information about a user as provided by the identity - provider. It is an indication that a user has been authenticated. - - scheduler manager - - A Compute component that determines where VM instances should - start. Uses modular design to support a variety of scheduler - types. - - scoped token - - An Identity service API access token that is associated with a - specific tenant. - - scrubber - - Checks for and deletes unused VMs; the component of Image - service that implements delayed delete. - - secret key - - String of text known only by the user; used along with an access - key to make requests to the Compute API. - - secure shell (SSH) - - Open source tool used to access remote hosts through an - encrypted communications channel, SSH key injection is supported by - Compute. - - security group - - A set of network traffic filtering rules that are applied to a - Compute instance. - - segmented object - - An Object Storage large object that has been broken up into - pieces. The re-assembled object is called a concatenated - object. - - self-service - - For IaaS, ability for a regular (non-privileged) account to - manage a virtual infrastructure component such as networks without - involving an administrator. - - SELinux - - Linux kernel security module that provides the mechanism for - supporting access control policies. - - senlin - - OpenStack project that provides a Clustering service. - - server - - Computer that provides explicit services to the client software - running on that system, often managing a variety of computer - operations. - A server is a VM instance in the Compute system. Flavor and - image are requisite elements when creating a server. - - server image - - Alternative term for a VM image. - - server UUID - - Unique ID assigned to each guest VM instance. - - service - - An OpenStack service, such as Compute, Object Storage, or Image - service. Provides one or more endpoints through which users can access - resources and perform operations. - - service catalog - - Alternative term for the Identity service catalog. - - service ID - - Unique ID assigned to each service that is available in the - Identity service catalog. - - service provider - - A system that provides services to other system entities. In - case of federated identity, OpenStack Identity is the service - provider. - - service registration - - An Identity service feature that enables services, such as - Compute, to automatically register with the catalog. - - service tenant - - Special tenant that contains all services that are listed in the - catalog. - - service token - - An administrator-defined token used by Compute to communicate - securely with the Identity service. - - session back end - - The method of storage used by horizon to track client sessions, - such as local memory, cookies, a database, or memcached. - - session persistence - - A feature of the load-balancing service. It attempts to force - subsequent connections to a service to be redirected to the same node - as long as it is online. - - session storage - - A horizon component that stores and tracks client session - information. Implemented through the Django sessions framework. - - share - - A remote, mountable file system in the context of the Shared File - Systems. You can mount a share to, and access a share from, several - hosts by several users at a time. - - share network - - An entity in the context of the Shared File Systems that - encapsulates interaction with the Networking service. If the driver - you selected runs in the mode requiring such kind of interaction, you - need to specify the share network to create a share. - - Shared File Systems API - - A Shared File Systems service that provides a stable RESTful API. - The service authenticates and routes requests throughout the Shared - File Systems service. There is python-manilaclient to interact with - the API. - - Shared File Systems service - - An OpenStack service that provides a set of services for - management of shared file systems in a multi-tenant cloud - environment. The service is similar to how OpenStack provides - block-based storage management through the OpenStack Block Storage - service project. With the Shared File Systems service, you can create - a remote file system and mount the file system on your instances. You - can also read and write data from your instances to and from your - file system. The project name of the Shared File Systems service is - manila. - - shared IP address - - An IP address that can be assigned to a VM instance within the - shared IP group. Public IP addresses can be shared across multiple - servers for use in various high-availability scenarios. When an IP - address is shared to another server, the cloud network restrictions - are modified to enable each server to listen to and respond on that IP - address. You can optionally specify that the target server network - configuration be modified. Shared IP addresses can be used with many - standard heartbeat facilities, such as keepalive, that monitor for - failure and manage IP failover. - - shared IP group - - A collection of servers that can share IPs with other members of - the group. Any server in a group can share one or more public IPs with - any other server in the group. With the exception of the first server - in a shared IP group, servers must be launched into shared IP groups. - A server may be a member of only one shared IP group. - - shared storage - - Block storage that is simultaneously accessible by multiple - clients, for example, NFS. - - Sheepdog - - Distributed block storage system for QEMU, supported by - OpenStack. - - Simple Cloud Identity Management (SCIM) - - Specification for managing identity in the cloud, currently - unsupported by OpenStack. - - Single-root I/O Virtualization (SR-IOV) - - A specification that, when implemented by a physical PCIe - device, enables it to appear as multiple separate PCIe devices. This - enables multiple virtualized guests to share direct access to the - physical device, offering improved performance over an equivalent - virtual device. Currently supported in OpenStack Havana and later - releases. - - Service Level Agreement (SLA) - - Contractual obligations that ensure the availability of a - service. - - SmokeStack - - Runs automated tests against the core OpenStack API; written in - Rails. - - snapshot - - A point-in-time copy of an OpenStack storage volume or image. - Use storage volume snapshots to back up volumes. Use image snapshots - to back up data, or as "gold" images for additional servers. - - soft reboot - - A controlled reboot where a VM instance is properly restarted - through operating system commands. - - Software Development Lifecycle Automation service - - OpenStack project that aims to make cloud services easier to - consume and integrate with application development process - by automating the source-to-image process, and simplifying - app-centric deployment. The project name is solum. - - SolidFire Volume Driver - - The Block Storage driver for the SolidFire iSCSI storage - appliance. - - solum - - OpenStack project that provides a Software Development - Lifecycle Automation service. - - SPICE - - The Simple Protocol for Independent Computing Environments - (SPICE) provides remote desktop access to guest virtual machines. It - is an alternative to VNC. SPICE is supported by OpenStack. - - spread-first scheduler - - The Compute VM scheduling algorithm that attempts to start a new - VM on the host with the least amount of load. - - SQL-Alchemy - - An open source SQL toolkit for Python, used in OpenStack. - - SQLite - - A lightweight SQL database, used as the default persistent - storage method in many OpenStack services. - - stack - - A set of OpenStack resources created and managed by the - Orchestration service according to a given template (either an - AWS CloudFormation template or a Heat Orchestration - Template (HOT)). - - StackTach - - Community project that captures Compute AMQP communications; - useful for debugging. - - static IP address - - Alternative term for a fixed IP address. - - StaticWeb - - WSGI middleware component of Object Storage that serves - container data as a static web page. - - storage back end - - The method that a service uses for persistent storage, such as - iSCSI, NFS, or local disk. - - storage node - - An Object Storage node that provides container services, account - services, and object services; controls the account databases, - container databases, and object storage. - - storage manager - - A XenAPI component that provides a pluggable interface to - support a wide variety of persistent storage back ends. - - storage manager back end - - A persistent storage method supported by XenAPI, such as iSCSI - or NFS. - - storage services - - Collective name for the Object Storage object services, - container services, and account services. - - strategy - - Specifies the authentication source used by Image service or - Identity. In the Database service, it refers to the extensions - implemented for a data store. - - subdomain - - A domain within a parent domain. Subdomains cannot be - registered. Subdomains enable you to delegate domains. Subdomains can - themselves have subdomains, so third-level, fourth-level, fifth-level, - and deeper levels of nesting are possible. - - subnet - - Logical subdivision of an IP network. - - SUSE Linux Enterprise Server (SLES) - - A Linux distribution that is compatible with OpenStack. - - suspend - - Alternative term for a paused VM instance. - - swap - - Disk-based virtual memory used by operating systems to provide - more memory than is actually available on the system. - - swauth - - An authentication and authorization service for Object Storage, - implemented through WSGI middleware; uses Object Storage itself as the - persistent backing store. - - swift - - An OpenStack core project that provides object storage - services. - - swift All in One (SAIO) - - Creates a full Object Storage development environment within a - single VM. - - swift middleware - - Collective term for Object Storage components that provide - additional functionality. - - swift proxy server - - Acts as the gatekeeper to Object Storage and is responsible for - authenticating the user. - - swift storage node - - A node that runs Object Storage account, container, and object - services. - - sync point - - Point in time since the last container and accounts database - sync among nodes within Object Storage. - - sysadmin - - One of the default roles in the Compute RBAC system. Enables a - user to add other users to a project, interact with VM images that are - associated with the project, and start and stop VM instances. - - system usage - - A Compute component that, along with the notification system, - collects meters and usage information. This information can be used - for billing. - - Telemetry service - - An integrated project that provides metering and measuring - facilities for OpenStack. The project name of Telemetry is - ceilometer. - - TempAuth - - An authentication facility within Object Storage that enables - Object Storage itself to perform authentication and authorization. - Frequently used in testing and development. - - Tempest - - Automated software test suite designed to run against the trunk - of the OpenStack core project. - - TempURL - - An Object Storage middleware component that enables creation of - URLs for temporary object access. - - tenant - - A group of users; used to isolate access to Compute resources. - An alternative term for a project. - - Tenant API - - An API that is accessible to tenants. - - tenant endpoint - - An Identity service API endpoint that is associated with one or - more tenants. - - tenant ID - - Unique ID assigned to each tenant within the Identity service. - The project IDs map to the tenant IDs. - - token - - An alpha-numeric string of text used to access OpenStack APIs - and resources. - - token services - - An Identity service component that manages and validates tokens - after a user or tenant has been authenticated. - - tombstone - - Used to mark Object Storage objects that have been - deleted; ensures that the object is not updated on another node after - it has been deleted. - - topic publisher - - A process that is created when a RPC call is executed; used to - push the message to the topic exchange. - - Torpedo - - Community project used to run automated tests against the - OpenStack API. - - transaction ID - - Unique ID assigned to each Object Storage request; used for - debugging and tracing. - - transient - - Alternative term for non-durable. - - transient exchange - - Alternative term for a non-durable exchange. - - transient message - - A message that is stored in memory and is lost after the server - is restarted. - - transient queue - - Alternative term for a non-durable queue. - - TripleO - - OpenStack-on-OpenStack program. The code name for the - OpenStack Deployment program. - - trove - - OpenStack project that provides database services to - applications. - - Ubuntu - - A Debian-based Linux distribution. - - unscoped token - - Alternative term for an Identity service default token. - - updater - - Collective term for a group of Object Storage components that - processes queued and failed updates for containers and objects. - - user - - In OpenStack Identity, entities represent individual API - consumers and are owned by a specific domain. In OpenStack Compute, - a user can be associated with roles, projects, or both. - - user data - - A blob of data that the user can specify when they launch - an instance. The instance can access this data through the - metadata service or config drive. - Commonly used to pass a shell script that the instance runs on boot. - - User Mode Linux (UML) - - An OpenStack-supported hypervisor. - - VIF UUID - - Unique ID assigned to each Networking VIF. - - VIP - - The primary load balancing configuration object. - Specifies the virtual IP address and port where client traffic - is received. Also defines other details such as the load - balancing method to be used, protocol, and so on. This entity - is sometimes known in load-balancing products as a virtual - server, vserver, or listener. - - Virtual Central Processing Unit (vCPU) - - Subdivides physical CPUs. Instances can then use those - divisions. - - Virtual Disk Image (VDI) - - One of the VM image disk formats supported by Image - service. - - VXLAN - - A network virtualization technology that attempts to reduce the - scalability problems associated with large cloud computing - deployments. It uses a VLAN-like encapsulation technique to - encapsulate Ethernet frames within UDP packets. - - Virtual Hard Disk (VHD) - - One of the VM image disk formats supported by Image - service. - - virtual IP - - An Internet Protocol (IP) address configured on the load - balancer for use by clients connecting to a service that is load - balanced. Incoming connections are distributed to back-end nodes based - on the configuration of the load balancer. - - virtual machine (VM) - - An operating system instance that runs on top of a hypervisor. - Multiple VMs can run at the same time on the same physical - host. - - virtual network - - An L2 network segment within Networking. - - virtual networking - - A generic term for virtualization of network functions - such as switching, routing, load balancing, and security using - a combination of VMs and overlays on physical network - infrastructure. - - Virtual Network Computing (VNC) - - Open source GUI and CLI tools used for remote console access to - VMs. Supported by Compute. - - Virtual Network InterFace (VIF) - - An interface that is plugged into a port in a Networking - network. Typically a virtual network interface belonging to a - VM. - - virtual port - - Attachment point where a virtual interface connects to a virtual - network. - - virtual private network (VPN) - - Provided by Compute in the form of cloudpipes, specialized - instances that are used to create VPNs on a per-project basis. - - virtual server - - Alternative term for a VM or guest. - - virtual switch (vSwitch) - - Software that runs on a host or node and provides the features - and functions of a hardware-based network switch. - - virtual VLAN - - Alternative term for a virtual network. - - VirtualBox - - An OpenStack-supported hypervisor. - - VLAN manager - - A Compute component that provides dnsmasq and radvd and sets up - forwarding to and from cloudpipe instances. - - VLAN network - - The Network Controller provides virtual networks to enable - compute servers to interact with each other and with the public - network. All machines must have a public and private network - interface. A VLAN network is a private network interface, which is - controlled by the ``vlan_interface`` option with VLAN - managers. - - VM disk (VMDK) - - One of the VM image disk formats supported by Image - service. - - VM image - - Alternative term for an image. - - VM Remote Control (VMRC) - - Method to access VM instance consoles using a web browser. - Supported by Compute. - - VMware API - - Supports interaction with VMware products in Compute. - - VMware NSX Neutron plug-in - - Provides support for VMware NSX in Neutron. - - VNC proxy - - A Compute component that provides users access to the consoles - of their VM instances through VNC or VMRC. - - volume - - Disk-based data storage generally represented as an iSCSI target - with a file system that supports extended attributes; can be - persistent or ephemeral. - - Volume API - - Alternative name for the Block Storage API. - - volume controller - - A Block Storage component that oversees and coordinates storage - volume actions. - - volume driver - - Alternative term for a volume plug-in. - - volume ID - - Unique ID applied to each storage volume under the Block Storage - control. - - volume manager - - A Block Storage component that creates, attaches, and detaches - persistent storage volumes. - - volume node - - A Block Storage node that runs the cinder-volume daemon. - - volume plug-in - - Provides support for new and specialized types of back-end - storage for the Block Storage volume manager. - - volume worker - - A cinder component that interacts with back-end storage to manage - the creation and deletion of volumes and the creation of compute - volumes, provided by the cinder-volume daemon. - - vSphere - - An OpenStack-supported hypervisor. - - weighting - - A Compute process that determines the suitability of the VM - instances for a job for a particular host. For example, not enough RAM - on the host, too many CPUs on the host, and so on. - - weight - - Used by Object Storage devices to determine which storage - devices are suitable for the job. Devices are weighted by size. - - weighted cost - - The sum of each cost used when deciding where to start a new VM - instance in Compute. - - worker - - A daemon that listens to a queue and carries out tasks in - response to messages. For example, the cinder-volume worker manages volume - creation and deletion on storage arrays. - - Workflow service - - OpenStack project that provides a simple YAML-based language - to write workflows, tasks and transition rules, and a - service that allows to upload them, modify, run them at - scale and in a highly available manner, manage and monitor - workflow execution state and state of individual tasks. The - code name of the project is mistral. - - Xen - - Xen is a hypervisor using a microkernel design, providing - services that allow multiple computer operating systems to - execute on the same computer hardware concurrently. - - Xen API - - The Xen administrative API, which is supported by - Compute. - - Xen Cloud Platform (XCP) - - An OpenStack-supported hypervisor. - - Xen Storage Manager Volume Driver - - A Block Storage volume plug-in that enables communication with - the Xen Storage Manager API. - - XenServer - - An OpenStack-supported hypervisor. - - XFS - - High-performance 64-bit file system created by Silicon - Graphics. Excels in parallel I/O operations and data - consistency. - - zaqar - - OpenStack project that provides a message service to - applications. - - ZeroMQ - - Message queue software supported by OpenStack. An alternative to - RabbitMQ. Also spelled 0MQ. - - Zuul - - Tool used in OpenStack development to ensure correctly ordered - testing of changes in parallel. diff --git a/doc/common/source/locale/ja/LC_MESSAGES/common.po b/doc/common/source/locale/ja/LC_MESSAGES/common.po deleted file mode 100644 index c696f871..00000000 --- a/doc/common/source/locale/ja/LC_MESSAGES/common.po +++ /dev/null @@ -1,10567 +0,0 @@ -# Translators: -# Akihiro Motoki , 2013 -# Andreas Jaeger , 2014 -# myamamot , 2014 -# nao nishijima , 2015 -# Tomoyuki KATO , 2015 -# yfukuda , 2014 -# ykatabam , 2014 -# -# -# Akihiro Motoki , 2016. #zanata -# Andreas Jaeger , 2016. #zanata -# KATO Tomoyuki , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: Common documents 1.0.0\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2016-05-04 01:03+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-19 01:30+0000\n" -"Last-Translator: KATO Tomoyuki \n" -"Language: ja\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Japanese\n" - -msgid "(RADOS)" -msgstr "(RADOS)" - -msgid "" -"**CentOS, Fedora, and Red Hat Enterprise Linux:** https://www.rdoproject.org/" -msgstr "" -"**CentOS、Fedora、Red Hat Enterprise Linux:** https://www.rdoproject.org/" - -msgid "**Debian:** https://wiki.debian.org/OpenStack" -msgstr "**Debian:** https://wiki.debian.org/OpenStack" - -msgid "**Disk and CD-ROM bus model values**" -msgstr "**ディスクと CD-ROM のバスモデルの値**" - -msgid "**MacOS**" -msgstr "**MacOS**" - -msgid "**Microsoft Windows**" -msgstr "**Microsoft Windows**" - -msgid "**Process flow example**" -msgstr "**プロセスフロー例**" - -msgid "**Red Hat Enterprise Linux, CentOS, or Fedora.**" -msgstr "**Red Hat Enterprise Linux、CentOS、Fedora。**" - -msgid "**SUSE Linux Enterprise Server**" -msgstr "**SUSE Linux Enterprise Server**" - -msgid "**Ubuntu or Debian**" -msgstr "**Ubuntu または Debian**" - -msgid "**Ubuntu:** https://wiki.ubuntu.com/ServerTeam/CloudArchive" -msgstr "**Ubuntu:** https://wiki.ubuntu.com/ServerTeam/CloudArchive" - -msgid "**VIF model values**" -msgstr "**仮想インターフェースのモデルの値**" - -msgid "" -"**openSUSE and SUSE Linux Enterprise Server:** https://en.opensuse.org/" -"Portal:OpenStack" -msgstr "" -"**openSUSE、SUSE Linux Enterprise Server:** https://en.opensuse.org/Portal:" -"OpenStack" - -msgid "**openSUSE**" -msgstr "**openSUSE**" - -msgid "" -"*The OpenStack Image service is used to manage the virtual machine images in " -"an OpenStack cluster, not store them.* It provides an abstraction to " -"different methods for storage - a bridge to the storage, not the storage " -"itself." -msgstr "" -"*OpenStack Image service は、OpenStack クラスターにおける仮想マシンイメージを" -"管理するために使用されます。それらを保存するためではありません。* さまざまな" -"種類のストレージを抽象化します。ストレージへの架け橋となりますが、ストレージ" -"ではありません。" - -msgid "" -"*The OpenStack Object Storage can function on its own.* The Object Storage " -"(swift) product can be used independently of the Compute (nova) product." -msgstr "" -"*OpenStack Object Storage は、単体で機能できます。* Object Storage (swift) 製" -"品は、Compute (nova) 製品と独立して使用できます。" - -msgid "" -"*You cannot use OpenStack Object Storage like a traditional hard drive.* The " -"Object Storage relaxes some of the constraints of a POSIX-style file system " -"to get other gains. You can access the objects through an API which uses " -"HTTP. Subsequently you don't have to provide atomic operations (that is, " -"relying on eventual consistency), you can scale a storage system easily and " -"avoid a central point of failure." -msgstr "" -"*OpenStack Object Storage は従来のハードディスクのように使用する事が出来ませ" -"ん。* Object Storage は他のメリットを得るために、POSIX 形式のファイルシステム" -"の制約を緩和しています。オブジェクトには HTTP を使用する API 経由でアクセス出" -"来ます。また、Object Storage はオブジェクトに対するアトミックな操作を提供しな" -"い(=結果整合性(Eventual Consistency)に依る)ので、ストレージシステムを簡" -"単にスケールアウトでき、単一点障害を避ける事ができます。" - -msgid "6to4" -msgstr "6to4" - -msgid ":option:`--description`" -msgstr ":option:`--description`" - -msgid ":option:`--id-type`" -msgstr ":option:`--id-type`" - -msgid ":option:`--metadata`" -msgstr ":option:`--metadata`" - -msgid ":option:`--name`" -msgstr ":option:`--name`" - -msgid "A BLOB of data held by Object Storage; can be in any format." -msgstr "" -"Object Storage により保持されるデータの BLOB。あらゆる形式の可能性がある。" - -msgid "" -"A Block Storage component that creates, attaches, and detaches persistent " -"storage volumes." -msgstr "" -"永続ストレージボリュームを作成、接続、切断する Block Storage コンポーネント。" - -msgid "" -"A Block Storage component that oversees and coordinates storage volume " -"actions." -msgstr "" -"ストレージボリュームの操作を監督、調整する、Block Storage のコンポーネント。" - -msgid "A Block Storage node that runs the cinder-volume daemon." -msgstr "cinder-volume デーモンを実行する Block Storage ノード。" - -msgid "" -"A Block Storage volume plug-in that enables communication with the Xen " -"Storage Manager API." -msgstr "" -"Xen Storage Manager API と通信できる Block Storage ボリュームプラグイン。" - -msgid "" -"A CLI that communicates with the ``heat-api`` to run :term:`AWS` " -"CloudFormation APIs. End developers can directly use the Orchestration REST " -"API." -msgstr "" -":term:`AWS` CloudFormation API を実行するために、 ``heat-api`` と通信する " -"CLI。エンドの開発者は直接 Orchestration REST API を使用することもできます。" - -msgid "A CLI that communicates with the ``trove-api`` component." -msgstr "``trove-api`` コンポーネントと通信する CLI。" - -msgid "" -"A Ceph component that communicates with external clients, checks data state " -"and consistency, and performs quorum functions." -msgstr "" -"外部クライアントと通信し、データの状態と整合性を確認し、クォーラム機能を実行" -"する、Ceph コンポーネント。" - -msgid "" -"A Compute API parameter that downloads changes to the requested item since " -"your last request, instead of downloading a new, fresh set of data and " -"comparing it against the old data." -msgstr "" -"Compute API のパラメーター。古いデータと比較するために、新しいデータ群をダウ" -"ンロードする代わりに、最後に要求した後に実行された、要求した項目への変更をダ" -"ウンロードする。" - -msgid "" -"A Compute RabbitMQ message queue that remains active when the server " -"restarts." -msgstr "" -"サーバーの再起動時に有効なままとなる、Compute RabbitMQ メッセージキュー。" - -msgid "" -"A Compute RabbitMQ setting that determines whether a message exchange is " -"automatically created when the program starts." -msgstr "" -"メッセージ交換がプログラム起動時に自動的に作成されるかどうかを決める、" -"Compute の RabbitMQ の設定。" - -msgid "" -"A Compute back-end database table that contains the current workload, amount " -"of free RAM, and number of VMs running on each host. Used to determine on " -"which host a VM starts." -msgstr "" -"Computeバックエンドデータベースのテーブルには現在のワークロード、RAMの空き" -"量、各ホストで起動しているVMの数が含まれている。VMがどのホストで開始するのか" -"を決めるのに利用される。" - -msgid "" -"A Compute component that determines where VM instances should start. Uses " -"modular design to support a variety of scheduler types." -msgstr "" -"仮想マシンインスタンスが起動する場所を決める、Compute のコンポーネント。さま" -"ざまな種類のスケジューラーをサポートするために、モジュール型設計を使用する。" - -msgid "" -"A Compute component that enables OpenStack to communicate with Amazon EC2." -msgstr "" -"OpenStack が Amazon EC2 を利用できるようにするための Compute のコンポーネン" -"ト。" - -msgid "" -"A Compute component that manages IP address allocation, firewalls, and other " -"network-related tasks. This is the legacy networking option and an " -"alternative to Networking." -msgstr "" -"IP アドレス割り当て、ファイアウォール、その他ネットワーク関連タスクを管理す" -"る Compute のコンポーネント。レガシーネットワークのオプション。Networking の" -"代替。" - -msgid "" -"A Compute component that provides dnsmasq and radvd and sets up forwarding " -"to and from cloudpipe instances." -msgstr "" -"dnsmasq と radvd を提供し、cloudpipe インスタンスとの転送処理をセットアップす" -"る、Compute のコンポーネント。" - -msgid "" -"A Compute component that provides users access to the consoles of their VM " -"instances through VNC or VMRC." -msgstr "" -"ユーザーが VNC や VMRC 経由で仮想マシンインスタンスのコンソールにアクセスでき" -"るようにする Compute のコンポーネント。" - -msgid "" -"A Compute component that, along with the notification system, collects " -"meters and usage information. This information can be used for billing." -msgstr "" -"通知システムと一緒に動作し、計測項目と使用状況を収集する、Compute のコンポー" -"ネント。この情報は課金のために使用できる。" - -msgid "" -"A Compute daemon that orchestrates the network configuration of nodes, " -"including IP addresses, VLANs, and bridging. Also manages routing for both " -"public and private networks." -msgstr "" -"IP アドレス、VLAN、ブリッジなど、ノードのネットワーク設定をオーケストレーショ" -"ンする Compute のデーモン。また、パブリックネットワークとプライベートネット" -"ワークのルーティングを管理する。" - -msgid "" -"A Compute networking method where the OS network configuration information " -"is injected into the VM image before the instance starts." -msgstr "" -"インスタンスの起動前に、OS のネットワーク設定情報を仮想マシンイメージ内に注入" -"する、Compute のネットワーク方式。" - -msgid "" -"A Compute option that enables parent cells to pass resource requests to " -"child cells if the parent cannot provide the requested resource." -msgstr "" -"親が要求されたリソースを提供できない場合、親セルがリソース要求を子セルに渡す" -"事を可能にする Compute のオプション。" - -msgid "" -"A Compute process that determines the suitability of the VM instances for a " -"job for a particular host. For example, not enough RAM on the host, too many " -"CPUs on the host, and so on." -msgstr "" -"特定のホストがあるジョブ向けの仮想マシンインスタンスに対して適切かどうかを判" -"断する、Compute の処理。例えば、ホストのメモリー不足、ホストの CPU 過剰など。" - -msgid "A Debian-based Linux distribution." -msgstr "Debian ベースの Linux ディストリビューション。" - -msgid "A Java program that can be embedded into a web page." -msgstr "Web ページの中に組み込める Java プログラム。" - -msgid "A Linux distribution compatible with OpenStack." -msgstr "OpenStack と互換性のある Linux ディストリビューション。" - -msgid "A Linux distribution that is compatible with OpenStack." -msgstr "OpenStack と互換性のある Linux ディストリビューション。" - -msgid "A Networking extension that provides perimeter firewall functionality." -msgstr "境界ファイアウォール機能を提供する Networking 拡張。" - -msgid "" -"A Networking plug-in for Cisco devices and technologies, including UCS and " -"Nexus." -msgstr "UCS や Nexus などの Cisco デバイスや技術の Networking プラグイン。" - -msgid "" -"A SQLite database that contains Object Storage accounts and related metadata " -"and that the accounts server accesses." -msgstr "" -"Object Storage のアカウントと関連メタデータを保持し、アカウントサーバーがアク" -"セスする、SQLite データベース。" - -msgid "" -"A SQLite database that stores Object Storage containers and container " -"metadata. The container server accesses this database." -msgstr "" -"Object Storage コンテナーとコンテナーメタデータを保存する SQLite データベー" -"ス。コンテナーサーバーは、このデータベースにアクセスする。" - -msgid "" -"A Shared File Systems service that provides a stable RESTful API. The " -"service authenticates and routes requests throughout the Shared File Systems " -"service. There is python-manilaclient to interact with the API." -msgstr "" -"安定版の RESTful API を提供する Shared File Systems サービス。 Shared File " -"Systems サービスへのすべてのリクエストの認証と転送を行う。この API と通信する" -"ための python-manilaclient が提供されています。" - -msgid "" -"A VM image that does not save changes made to its volumes and reverts them " -"to their original state after the instance is terminated." -msgstr "" -"ボリュームへの変更が保存されない仮想マシンイメージ。インスタンスの終了後、元" -"の状態に戻される。" - -msgid "A VM instance that runs on a host." -msgstr "ホストで動作する仮想マシンインスタンス。" - -msgid "" -"A VM state where no changes occur (no changes in memory, network " -"communications stop, etc); the VM is frozen but not shut down." -msgstr "" -"変更が発生しない (メモリーの変更なし、ネットワーク通信の停止など)、仮想マシン" -"の状態。仮想マシンは停止するが、シャットダウンしない。" - -msgid "" -"A Windows project providing guest initialization features, similar to cloud-" -"init." -msgstr "cloud-init 同様のゲスト初期化機能を提供する Windows プロジェクト。" - -msgid "" -"A XenAPI component that provides a pluggable interface to support a wide " -"variety of persistent storage back ends." -msgstr "" -"さまざまな種類の永続ストレージバックエンドをサポートするために、プラグイン可" -"能なインターフェースを提供する XenAPI コンポーネント。" - -msgid "" -"A bit is a single digit number that is in base of 2 (either a zero or one). " -"Bandwidth usage is measured in bits per second." -msgstr "" -"ビットは、2 を基数とする単一のデジタル数値 (0 または 1)。帯域使用量は、ビット" -"毎秒 (bps) で計測される。" - -msgid "" -"A blob of data that the user can specify when they launch an instance. The " -"instance can access this data through the metadata service or config drive. " -"Commonly used to pass a shell script that the instance runs on boot." -msgstr "" -"インスタンス起動時にユーザが指定できる BLOB データ。インスタンスはこのデータ" -"にメタデータサービスやコンフィグドライブ経由でアクセスできる。通常、インスタ" -"ンスがブート時に実行するシェルスクリプトを渡すために使用される。" - -msgid "A central agent (``ceilometer-agent-central``)" -msgstr "中央エージェント (``ceilometer-agent-central``)" - -msgid "" -"A centralized server provides authentication and authorization services " -"using a RESTful interface." -msgstr "" -"中央サーバーが、RESTful インターフェースを使用して、認証と認可のサービスを提" -"供します。" - -msgid "" -"A cinder component that interacts with back-end storage to manage the " -"creation and deletion of volumes and the creation of compute volumes, " -"provided by the cinder-volume daemon." -msgstr "" -"ボリュームの作成や削除、コンピュートボリュームの作成を管理するために、バック" -"エンドのストレージと相互作用する cinder のコンポーネント。cinder-volume デー" -"モンにより提供される。" - -msgid "" -"A cli tool used to retrieve various metrics and telemetry information about " -"a cluster that has been collected by the swift-recon middleware." -msgstr "" -"swift-recon ミドルウェアにより収集されたクラスターの様々な性能情報や統計情報" -"を取得するために使用する CLI ツール。" - -msgid "" -"A collection of command-line tools for administering VMs; most are " -"compatible with OpenStack." -msgstr "" -"仮想マシンを管理するためのコマンドラインツール群。ほとんどは OpenStack と互換" -"性がある。" - -msgid "" -"A collection of components that provides object storage within Ceph. Similar " -"to OpenStack Object Storage." -msgstr "" -"Ceph 内にオブジェクトストレージを提供するコンポーネント群。OpenStack Object " -"Storage に似ている。" - -msgid "" -"A collection of files for a specific operating system (OS) that you use to " -"create or rebuild a server. OpenStack provides pre-built images. You can " -"also create custom images, or snapshots, from servers that you have " -"launched. Custom images can be used for data backups or as \"gold\" images " -"for additional servers." -msgstr "" -"サーバーの作成、再構築に使用する特定のオペレーティングシステム(OS)用のファ" -"イルの集合。OpenStack は構築済みイメージを提供する。起動したサーバーからカス" -"タムイメージ(またはスナップショット)を作成できる。" - -msgid "A collection of hypervisors grouped together through host aggregates." -msgstr "ホストアグリゲートにより一緒にグループ化されたハイパーバイザーの集合。" - -msgid "" -"A collection of servers that can share IPs with other members of the group. " -"Any server in a group can share one or more public IPs with any other server " -"in the group. With the exception of the first server in a shared IP group, " -"servers must be launched into shared IP groups. A server may be a member of " -"only one shared IP group." -msgstr "" -"グループの他のメンバーと IP を共有できるサーバー群。グループ内のサーバーは、" -"そのグループ内の他のサーバーと 1 つ以上のパブリック IP を共有できる。共有 IP " -"グループにおける 1 番目のサーバーを除き、サーバーは共有 IP グループの中で起動" -"する必要がある。サーバーは、共有 IP グループ 1 つだけのメンバーになれる。" - -msgid "" -"A collection of specifications used to access a service, application, or " -"program. Includes service calls, required parameters for each call, and the " -"expected return values." -msgstr "" -"サービス、アプリケーション、プログラムへのアクセスに使用される仕様の集合。" -"サービス呼出、各呼出に必要なパラメーター、想定される戻り値を含む。" - -msgid "A collector (``ceilometer-collector``)" -msgstr "コレクター (``ceilometer-collector``)" - -msgid "A comment with additional information that explains a part of the text." -msgstr "本文を補足する説明コメントです。" - -msgid "" -"A common API for vendors, admins, services, and users to meaningfully define " -"their own custom metadata. This metadata can be used on different types of " -"resources like images, artifacts, volumes, flavors, and aggregates. A " -"definition includes the new property's key, description, constraints, and " -"the resource types which it can be associated with." -msgstr "" -"ベンダー、管理者、サービス、ユーザーの独自のメタデータを有意義に定義するため" -"の共通 API。このメタデータは、イメージ、アーティファクト、ボリューム、フレー" -"バー、アグリゲートなどの、さまざまなリソースにおいて使用できます。定義には、" -"新しいプロパティーのキー、説明、制約、それが関連付けられるリソース種別が含ま" -"れます。" - -msgid "" -"A community project may be elevated to this status and is then promoted to a " -"core project." -msgstr "" -"コミュニティプロジェクトがこの状態に昇格する事があり、その後コアプロジェクト" -"に昇格する。" - -msgid "A compute agent (``ceilometer-agent-compute``)" -msgstr "コンピュートエージェント (``ceilometer-agent-compute``)" - -msgid "A compute service that creates VPNs on a per-project basis." -msgstr "プロジェクトごとの VPN を作成するコンピュートのサービス。" - -msgid "" -"A configurable option within Object Storage to automatically delete objects " -"after a specified amount of time has passed or a certain date is reached." -msgstr "" -"指定された時間経過後、又は指定日になった際に自動的にオブジェクトを削除するた" -"めの Object Storage の設定オプション。" - -msgid "" -"A content delivery network is a specialized network that is used to " -"distribute content to clients, typically located close to the client for " -"increased performance." -msgstr "" -"コンテンツ配信ネットワークは、クライアントにコンテンツを配信するために使用さ" -"れる特別なネットワーク。一般的に、パフォーマンス改善のために、クライアントの" -"近くに置かれる。" - -msgid "" -"A controlled reboot where a VM instance is properly restarted through " -"operating system commands." -msgstr "" -"オペレーティングシステムのコマンド経由で、仮想マシンインスタンスが正常に再起" -"動する、制御された再起動。" - -msgid "" -"A core OpenStack project that provides a network connectivity abstraction " -"layer to OpenStack Compute." -msgstr "" -"OpenStack のコアプロジェクトで、OpenStack Compute に対してネットワーク接続の" -"抽象化レイヤーを提供する。" - -msgid "" -"A core OpenStack project that provides a network connectivity abstraction " -"layer to OpenStack Compute. The project name of Networking is neutron." -msgstr "" -"ネットワーク接続性の抽象化レイヤーを OpenStack Compute に提供する、OpenStack " -"コアプロジェクト。Networking のプロジェクト名は neutron。" - -msgid "A core OpenStack project that provides block storage services for VMs." -msgstr "" -"ブロックストレージサービスを仮想マシンに提供する、OpenStack のコアプロジェク" -"ト。" - -msgid "A core project that provides the OpenStack Image service." -msgstr "OpenStack Image service を提供するコアプロジェクト。" - -msgid "" -"A daemon that listens to a queue and carries out tasks in response to " -"messages. For example, the cinder-volume worker manages volume creation and " -"deletion on storage arrays." -msgstr "" -"キューをリッスンし、メッセージに応じたタスクを実行するデーモン。例えば、" -"cinder-volume ワーカーは、ストレージにおけるボリュームの作成と削除を管理しま" -"す。" - -msgid "A database engine supported by the Database service." -msgstr "Database サービスがサポートしているデータベースエンジン。" - -msgid "" -"A default role in the Compute RBAC system that can quarantine an instance in " -"any project." -msgstr "" -"あらゆるプロジェクトにあるインスタンスを検疫できる、Compute RBAC システムにお" -"けるデフォルトのロール。" - -msgid "" -"A device that moves data in the form of blocks. These device nodes interface " -"the devices, such as hard disks, CD-ROM drives, flash drives, and other " -"addressable regions of memory." -msgstr "" -"ブロック状態のデータを移動するデバイス。これらのデバイスノードにはハードディ" -"スク、CD-ROM ドライブ、フラッシュドライブ、その他のアドレス可能なメモリの範囲" -"等がある。" - -msgid "" -"A directory service, which allows users to login with a user name and " -"password. It is a typical source of authentication tokens." -msgstr "" -"ユーザーがユーザー名とパスワードを用いてログインできるようにする、ディレクト" -"リーサービス。認証トークンの一般的な情報源。" - -msgid "" -"A discrete OpenStack environment with dedicated API endpoints that typically " -"shares only the Identity (keystone) with other regions." -msgstr "" -"専用の API エンドポイントを持つ、分離した OpenStack 環境。一般的に Identity " -"(keystone) のみを他のリージョンと共有する。" - -msgid "A disk storage protocol tunneled within Ethernet." -msgstr "Ethernet 内をトンネルされるディスクストレージプロトコル。" - -msgid "" -"A distributed memory object caching system that is used by Object Storage " -"for caching." -msgstr "" -"Object Storage がキャッシュのために使用する、メモリーオブジェクトの分散キャッ" -"シュシステム。" - -msgid "" -"A distributed, highly fault-tolerant file system designed to run on low-cost " -"commodity hardware." -msgstr "" -"低価格のコモディティーサーバー上で動作することを念頭に設計された、耐故障性に" -"優れた分散ファイルシステム。" - -msgid "" -"A domain within a parent domain. Subdomains cannot be registered. Subdomains " -"enable you to delegate domains. Subdomains can themselves have subdomains, " -"so third-level, fourth-level, fifth-level, and deeper levels of nesting are " -"possible." -msgstr "" -"親ドメイン内のドメイン。サブドメインは登録できない。サブドメインによりドメイ" -"ンを委譲できる。サブドメインは、サブドメインを持てるので、第 3 階層、第 4 階" -"層、第 5 階層と深い階層構造にできる。" - -msgid "" -"A driver for the Modular Layer 2 (ML2) neutron plug-in that provides layer-2 " -"connectivity for virtual instances. A single OpenStack installation can use " -"multiple mechanism drivers." -msgstr "" -"仮想インスタンス向けに L2 接続性を提供する、ML2 neutron プラグイン向けのドラ" -"イバー。単一の OpenStack インストール環境が、複数のメカニズムドライバーを使用" -"できます。" - -msgid "" -"A feature of Compute that allows the unprivileged \"nova\" user to run a " -"specified list of commands as the Linux root user." -msgstr "" -"非特権の「nova」ユーザーが Linux の root ユーザーとして指定したコマンド一覧を" -"実行できるようにする、Compute の機能。" - -msgid "" -"A feature of the load-balancing service. It attempts to force subsequent " -"connections to a service to be redirected to the same node as long as it is " -"online." -msgstr "" -"負荷分散サービスの機能の 1 つ。ノードがオンラインである限り、強制的に一連の接" -"続を同じノードにリダイレクトしようとする。" - -msgid "" -"A file sharing protocol. It is a public or open variation of the original " -"Server Message Block (SMB) protocol developed and used by Microsoft. Like " -"the SMB protocol, CIFS runs at a higher level and uses the TCP/IP protocol." -msgstr "" -"ファイル共有プロトコル。 Microsoft が開発し使用している Server Message Block " -"(SMB) プロトコルが公開されオープンになったものです。 SMB プロトコルと同様" -"に、 CIFS は上位レイヤーで動作し、TCP/IP プロトコルを使用します。" - -msgid "" -"A file system designed to aggregate NAS hosts, compatible with OpenStack." -msgstr "" -"NAS ホストを集約するために設計されたファイルシステム。OpenStack と互換性があ" -"る。" - -msgid "" -"A file used to customize a Compute instance. It can be used to inject SSH " -"keys or a specific network configuration." -msgstr "" -"Compute インスタンスをカスタマイズするために使用されるファイル。SSH 鍵や特定" -"のネットワーク設定を注入するために使用できます。" - -msgid "" -"A generic term for virtualization of network functions such as switching, " -"routing, load balancing, and security using a combination of VMs and " -"overlays on physical network infrastructure." -msgstr "" -"複数の仮想マシンを使用して、物理ネットワーク上にオーバーレイされる、スイッチ" -"ング、ルーティング、負荷分散、セキュリティーなどのネットワーク機能の仮想化に" -"関する一般的な用語。" - -msgid "" -"A group of fixed and/or floating IP addresses that are assigned to a project " -"and can be used by or assigned to the VM instances in a project." -msgstr "" -"プロジェクトに割り当てられ、プロジェクトの仮想マシンインスタンスに使用でき" -"る、 Fixed IP アドレスと Floating IP アドレスのグループ。" - -msgid "" -"A group of interrelated web development techniques used on the client-side " -"to create asynchronous web applications. Used extensively in horizon." -msgstr "" -"非同期 Web アプリケーションを作成する為にクライアント側で使用される相互関係の" -"ある Web 開発技術の集合。Horizon で広く使用されている。" - -msgid "" -"A group of related button types within horizon. Buttons to start, stop, and " -"suspend VMs are in one class. Buttons to associate and disassociate floating " -"IP addresses are in another class, and so on." -msgstr "" -"Horizon 内で関連するボタン種別のグループ。仮想マシンを起動、停止、休止するボ" -"タンは、1 つのクラスにある。Floating IP アドレスを関連付ける、関連付けを解除" -"するボタンは、別のクラスにある。" - -msgid "" -"A group of users; used to isolate access to Compute resources. An " -"alternative term for a project." -msgstr "" -"ユーザーのグループ。Compute リソースへのアクセスを分離するために使用される。" -"プロジェクトの別名。" - -msgid "" -"A grouped release of projects related to OpenStack that came out in April " -"2012, the fifth release of OpenStack. It included Compute (nova 2012.1), " -"Object Storage (swift 1.4.8), Image (glance), Identity (keystone), and " -"Dashboard (horizon). Essex is the code name for the fifth release of " -"OpenStack. The design summit took place in Boston, Massachusetts, US and " -"Essex is a nearby city." -msgstr "" -"2012年4月に登場した OpenStack 関連プロジェクトのリリース。Compute (nova " -"2012.1), Object Storage (swift 1.4.8), Image (glance), Identity (keystone), " -"Dashboard (horizon) が含まれる。Essex は、OpenStack の 5 番目のリリースのコー" -"ド名。デザインサミットは、アメリカ合衆国マサチューセッツ州ボストンで開催され" -"た。Essex はその近郊都市。" - -msgid "" -"A grouped release of projects related to OpenStack that came out in February " -"of 2011. It included only Compute (nova) and Object Storage (swift). Bexar " -"is the code name for the second release of OpenStack. The design summit took " -"place in San Antonio, Texas, US, which is the county seat for Bexar county." -msgstr "" -"2011 年 2 月に登場した OpenStack 関連プロジェクトのリリース。Compute (nova) " -"と Object Storage (swift) が含まれる。Bexar は OpenStack の 2 番目のコード" -"名。デザインサミットは、アメリカ合衆国テキサス州サンアントニオで開催された。" -"ベア郡の郡庁所在地。" - -msgid "" -"A grouped release of projects related to OpenStack that came out in the fall " -"of 2011, the fourth release of OpenStack. It included Compute (nova 2011.3), " -"Object Storage (swift 1.4.3), and the Image service (glance). Diablo is the " -"code name for the fourth release of OpenStack. The design summit took place " -"in the Bay Area near Santa Clara, California, US and Diablo is a nearby city." -msgstr "" -"2011年秋に登場した OpenStack 関連プロジェクトのリリース。Compute (nova " -"2011.3)、Object Storage (swift 1.4.3)、Image service (glance) が含まれる。" -"Diablo は、OpenStack の 4 番目のリリースのコード名。デザインサミットは、アメ" -"リカ合衆国カリフォルニア州サンタクララ近くにある海岸エリアで開催された。" -"Diablo はその近郊都市。" - -msgid "" -"A grouped release of projects related to OpenStack that came out in the fall " -"of 2012, the sixth release of OpenStack. It includes Compute (nova), Object " -"Storage (swift), Identity (keystone), Networking (neutron), Image service " -"(glance), and Volumes or Block Storage (cinder). Folsom is the code name for " -"the sixth release of OpenStack. The design summit took place in San " -"Francisco, California, US and Folsom is a nearby city." -msgstr "" -"2012年秋に登場した OpenStack 関連プロジェクトのリリース。Compute (nova), " -"Object Storage (swift), Identity (keystone), Networking (neutron), Image " -"service (glance)、Volumes 又は Block Storage (cinder) が含まれる。Folsom は、" -"OpenStack の 6 番目のリリースのコード名。デザインサミットは、アメリカ合衆国カ" -"リフォルニア州サンフランシスコで開催された。Folsom は、その近郊都市。" - -msgid "" -"A high availability system design approach and associated service " -"implementation ensures that a prearranged level of operational performance " -"will be met during a contractual measurement period. High availability " -"systems seeks to minimize system downtime and data loss." -msgstr "" -"高可用性システムの設計手法および関連サービスの実装により、契約された計測期間" -"中、合意された運用レベルを満たします。高可用性システムは、システムの停止時間" -"とデータ損失を最小化しようとします。" - -msgid "" -"A horizon component that stores and tracks client session information. " -"Implemented through the Django sessions framework." -msgstr "" -"クライアントセッションの保持と追跡を行う Horizon のコンポーネント。 Django の" -"セッションフレームワークを用いて実装されている。" - -msgid "" -"A hybrid cloud is a composition of two or more clouds (private, community or " -"public) that remain distinct entities but are bound together, offering the " -"benefits of multiple deployment models. Hybrid cloud can also mean the " -"ability to connect colocation, managed and/or dedicated services with cloud " -"resources." -msgstr "" -"ハイブリッドクラウドは、複数のクラウド (プライベート、コミュニティー、パブ" -"リック) の組み合わせ。別々のエンティティーのままですが、一緒にまとめられる。" -"複数の配備モデルの利点を提供する。ハイブリッドクラウドは、コロケーション、マ" -"ネージドサービス、専用サービスをクラウドのリソースに接続する機能を意味するこ" -"ともある。" - -msgid "" -"A kind of web service API that uses REST, or Representational State " -"Transfer. REST is the style of architecture for hypermedia systems that is " -"used for the World Wide Web." -msgstr "" -"REST を使用する Web サービス API の 1 種。REST は、WWW 向けに使用される、ハイ" -"パーメディアシステム向けのアーキテクチャーの形式である。" - -msgid "" -"A lightweight SQL database, used as the default persistent storage method in " -"many OpenStack services." -msgstr "" -"軽量 SQL データベース。多くの OpenStack サービスでデフォルトの永続ストレージ" -"として使用されている。" - -msgid "" -"A list of API endpoints that are available to a user after authentication " -"with the Identity service." -msgstr "Identity による認証後、ユーザーが利用可能な API エンドポイントの一覧。" - -msgid "" -"A list of URL and port number endpoints that indicate where a service, such " -"as Object Storage, Compute, Identity, and so on, can be accessed." -msgstr "" -"URL やポート番号のエンドポイントの一覧。Object Storage、Compute、Identity な" -"どのサービスがアクセスできる場所を意味する。" - -msgid "A list of VM images that are available through Image service." -msgstr "Image service 経由で利用可能な仮想マシンイメージの一覧。" - -msgid "" -"A list of permissions attached to an object. An ACL specifies which users or " -"system processes have access to objects. It also defines which operations " -"can be performed on specified objects. Each entry in a typical ACL specifies " -"a subject and an operation. For instance, the ACL entry ``(Alice, delete)`` " -"for a file gives Alice permission to delete the file." -msgstr "" -"オブジェクトに対するアクセス許可の一覧。オブジェクトにアクセスできるユーザー" -"やシステムプロセスを指定する。また、特定のオブジェクトに対してどのような操作" -"が行えるかを定義する。通常のアクセス制御リスト (ACL) の項目では対象項目と操作" -"を指定する。例えば、あるファイルに対して ``(Alice, delete)`` という ACL 項目" -"が定義されると、 Alice にファイルを削除する許可が付与される。" - -msgid "" -"A list of tenants that can access a given VM image within Image service." -msgstr "" -"Image service 内で指定した仮想マシンイメージにアクセスできるテナントの一覧。" - -msgid "" -"A load balancer is a logical device that belongs to a cloud account. It is " -"used to distribute workloads between multiple back-end systems or services, " -"based on the criteria defined as part of its configuration." -msgstr "" -"負荷分散装置は、クラウドアカウントに属する論理デバイスである。その設定に定義" -"されている基準に基づき、複数のバックエンドのシステムやサービス間でワークロー" -"ドを分散するために使用される。" - -msgid "" -"A logical set of devices, such as web servers, that you group together to " -"receive and process traffic. The load balancing function chooses which " -"member of the pool handles the new requests or connections received on the " -"VIP address. Each VIP has one pool." -msgstr "" -"Web サーバーなどのデバイスの論理的な集合。一緒にトラフィックを受け、処理する" -"ために、グループ化する。負荷分散機能は、プール内のどのメンバーが仮想 IP アド" -"レスで受信した新規リクエストや接続を処理するかを選択します。各仮想 IP は 1 つ" -"のプールを持ちます。" - -msgid "" -"A mechanism that allows IPv6 packets to be transmitted over an IPv4 network, " -"providing a strategy for migrating to IPv6." -msgstr "" -"IPv6 パケットを IPv4 ネットワーク経由で送信するための機構。IPv6 に移行する手" -"段を提供する。" - -msgid "" -"A mechanism that allows many resources (for example, fonts, JavaScript) on a " -"web page to be requested from another domain outside the domain from which " -"the resource originated. In particular, JavaScript's AJAX calls can use the " -"XMLHttpRequest mechanism." -msgstr "" -"Web ページのさまざまなリソース (例: フォント、JavaScript) を、リソースのある" -"ドメインの外部から要求できるようになる機能。とくに、JavaScript の AJAX コール" -"が XMLHttpRequest 機能を使用できる。" - -msgid "" -"A message that is stored both in memory and on disk. The message is not lost " -"after a failure or restart." -msgstr "" -"メモリーとディスクの両方に保存されているメッセージ。メッセージは、故障や再起" -"動した後も失われません。" - -msgid "" -"A message that is stored in memory and is lost after the server is restarted." -msgstr "メモリーに保存され、サービスの再起動後に失われるメッセージ。" - -msgid "" -"A method for making file systems available over the network. Supported by " -"OpenStack." -msgstr "" -"ネットワーク経由でファイルシステムを利用可能にある方式。OpenStack によりサ" -"ポートされる。" - -msgid "" -"A method of VM live migration used by KVM to evacuate instances from one " -"host to another with very little downtime during a user-initiated " -"switchover. Does not require shared storage. Supported by Compute." -msgstr "" -"ユーザー操作によりあるホストから別のホストに切り替え中、わずかな停止時間でイ" -"ンスタンスを退避するために、KVM により使用される仮想マシンのライブマイグレー" -"ションの方法。共有ストレージ不要。Compute によりサポートされる。" - -msgid "" -"A method of operating system installation where a finalized disk image is " -"created and then used by all nodes without modification." -msgstr "" -"最終的なディスクイメージが作成され、すべてのノードで変更することなく使用され" -"る、オペレーティングシステムのインストール方法。" - -msgid "" -"A method to automatically configure networking for a host at boot time. " -"Provided by both Networking and Compute." -msgstr "" -"ホストの起動時にネットワークを自動的に設定する方式。Networking と Compute に" -"より提供される。" - -msgid "" -"A method to establish trusts between identity providers and the OpenStack " -"cloud." -msgstr "認証プロバイダーと OpenStack クラウド間で信頼を確立する方法。" - -msgid "" -"A method to further subdivide availability zones into hypervisor pools, a " -"collection of common hosts." -msgstr "" -"アベイラビリティーゾーンをさらに小さいハイパーバイザープールに分割するための" -"方法。一般的なホスト群。" - -msgid "" -"A minimal Linux distribution designed for use as a test image on clouds such " -"as OpenStack." -msgstr "" -"OpenStack などのクラウドでテストイメージとして使用するために設計された最小の " -"Linux ディストリビューション。" - -msgid "" -"A model that enables access to a shared pool of configurable computing " -"resources, such as networks, servers, storage, applications, and services, " -"that can be rapidly provisioned and released with minimal management effort " -"or service provider interaction." -msgstr "" -"ネットワーク、サーバー、ストレージ、アプリケーション、サービスなどの設定可能" -"なコンピューティングリソースの共有プールにアクセスできるモデル。最小限の管理" -"作業やサービスプロバイダーとのやりとりで、迅速に配備できてリリースできる。" - -msgid "" -"A network authentication protocol which works on the basis of tickets. " -"Kerberos allows nodes communication over a non-secure network, and allows " -"nodes to prove their identity to one another in a secure manner." -msgstr "" -"チケットベースで機能するネットワーク認証プロトコル。 Kerberos により、安全で" -"ないネットワークを通したノード通信ができ、ノードは安全な方法で互いに本人確認" -"ができるようになります。" - -msgid "" -"A network protocol used by a network client to obtain an IP address from a " -"configuration server. Provided in Compute through the dnsmasq daemon when " -"using either the FlatDHCP manager or VLAN manager network manager." -msgstr "" -"管理サーバーから IP アドレスを取得するために、ネットワーククライアントにより" -"使用されるネットワークプロトコル。FlatDHCP マネージャーや VLAN マネージャー使" -"用時、dnsmasq デーモン経由で Compute で提供される。" - -msgid "A network segment typically used for instance Internet access." -msgstr "" -"一般的にインスタンスのインターネットアクセスに使用されるネットワークセグメン" -"ト。" - -msgid "" -"A network segment used for administration, not accessible to the public " -"Internet." -msgstr "" -"管理のために使用されるネットワークセグメント。パブリックなインターネットから" -"アクセスできない。" - -msgid "" -"A network segment used for instance traffic tunnels between compute nodes " -"and the network node." -msgstr "" -"コンピュートノードとネットワークノード間で、インスタンスのトラフィックをトン" -"ネルするために使用されるネットワークセグメント。" - -msgid "" -"A network virtualization technology that attempts to reduce the scalability " -"problems associated with large cloud computing deployments. It uses a VLAN-" -"like encapsulation technique to encapsulate Ethernet frames within UDP " -"packets." -msgstr "" -"大規模なクラウドコンピューティング環境に関連するスケーラビリティー問題を削減" -"するためのネットワーク仮想化技術。VLAN のようなカプセル化技術を使用して、" -"Ethernet フレームを UDP パケット内にカプセル化する。" - -msgid "A node that provides the Object Storage proxy service." -msgstr "Object Storage プロキシサービスを提供するノード。" - -msgid "" -"A node that runs Object Storage account, container, and object services." -msgstr "" -"Object Storage のアカウントサービス、コンテナーサービス、オブジェクトサービス" -"を実行するノード。" - -msgid "" -"A node that runs network, volume, API, scheduler, and image services. Each " -"service may be broken out into separate nodes for scalability or " -"availability." -msgstr "" -"ネットワーク、ボリューム、API、スケジューラー、イメージサービスなどを実行する" -"ノード。各サービスは、スケーラビリティーや可用性のために、別々のノードに分割" -"することもできます。" - -msgid "" -"A node that runs the nova-compute daemon that manages VM instances that " -"provide a wide range of services, such as web applications and analytics." -msgstr "" -"nova-compute デーモン、Web アプリケーションや分析のような幅広いサービスを提供" -"する仮想マシンインスタンスを実行するノード。" - -msgid "A notification agent (``ceilometer-agent-notification``)" -msgstr "通知エージェント (``ceilometer-agent-notification``)" - -msgid "" -"A notification driver that monitors VM instances and updates the capacity " -"cache as needed." -msgstr "" -"VM インスタンスを監視し、必要に応じて容量キャッシュを更新する通知ドライバ。" - -msgid "A notification listener (``aodh-listener``)" -msgstr "通知リスナー (``aodh-listener``)" - -msgid "" -"A number of periodic processes run on the OpenStack Image service to support " -"caching. Replication services ensure consistency and availability through " -"the cluster. Other periodic processes include auditors, updaters, and " -"reapers." -msgstr "" -"キャッシュをサポートするために、 OpenStack Image service 上では多くの定期的な" -"プロセスが実行されます。複製サービスにより、クラスター全体での一貫性と可用性" -"が確保されます。他の定期的なプロセスには auditor、 updater, reaper がありま" -"す。" - -msgid "" -"A number within a database that is incremented each time a change is made. " -"Used by Object Storage when replicating." -msgstr "" -"変更が行われる度に増加するデータベース内の数値。Object Storage が複製を行う際" -"に使用する。" - -msgid "" -"A package commonly installed in VM images that performs initialization of an " -"instance after boot using information that it retrieves from the metadata " -"service, such as the SSH public key and user data." -msgstr "" -"メタデータサービスから取得した、SSH 公開鍵やユーザーデータなどの情報を使用し" -"て、インスタンスの起動後に初期化を実行する、一般的に仮想マシンイメージにイン" -"ストールされるパッケージ。" - -msgid "" -"A packaged version available in the Open Build Service (`https://build." -"opensuse.org/package/show? package=python-pip&project=Cloud:OpenStack:Master " -"`__) enables you to use YaST or zypper to install the " -"package." -msgstr "" -"Open Build Service (`https://build.opensuse.org/package/show?package=python-" -"pip&project=Cloud:OpenStack:Master `__) にあるパッケージを利用" -"できます。または、パッケージをインストールするために YaST や zypper を使用で" -"きます。" - -msgid "A packaged version enables you to use yum to install the package:" -msgstr "yum を使用してインストールできるパッケージがあります。" - -msgid "A persistent storage method supported by XenAPI, such as iSCSI or NFS." -msgstr "iSCSI や NFS など、XenAPI によりサポートされる永続ストレージ方式。" - -msgid "A person who plans, designs, and oversees the creation of clouds." -msgstr "クラウドの作成を計画、設計および監督する人。" - -msgid "" -"A personality that a user assumes to perform a specific set of operations. A " -"role includes a set of rights and privileges. A user assuming that role " -"inherits those rights and privileges." -msgstr "" -"ユーザーが特定の操作の組を実行すると仮定する人格。ロールは一組の権利と権限を" -"含みます。そのロールを仮定しているユーザーは、それらの権利と権限を継承しま" -"す。" - -msgid "A physical computer, not a VM instance (node)." -msgstr "物理コンピューター。仮想マシンインスタンス (ノード) ではない。" - -msgid "" -"A physical or virtual device that provides connectivity to another device or " -"medium." -msgstr "他のデバイスやメディアに接続する物理デバイスまたは仮想デバイス。" - -msgid "" -"A physical or virtual network device that passes network traffic between " -"different networks." -msgstr "" -"異なるネットワーク間でネットワーク通信を転送する、物理または仮想のネットワー" -"クデバイス。" - -msgid "" -"A piece of software that makes available another piece of software over a " -"network." -msgstr "" -"他のソフトウェア部品をネットワーク経由で利用可能にするソフトウェア部品。" - -msgid "" -"A platform that provides a suite of desktop environments that users access " -"to receive a desktop experience from any location. This may provide general " -"use, development, or even homogeneous testing environments." -msgstr "" -"デスクトップ環境群を提供するプラットフォーム。ユーザーがどこからでもデスク" -"トップを利用するためにアクセスする可能性がある。一般的な使用、開発、同種のテ" -"スト環境さえも提供できる。" - -msgid "A plug-in for the OpenStack dashboard (horizon)." -msgstr "OpenStack dashboard (horizon) のプラグイン。" - -msgid "" -"A point-in-time copy of an OpenStack storage volume or image. Use storage " -"volume snapshots to back up volumes. Use image snapshots to back up data, or " -"as \"gold\" images for additional servers." -msgstr "" -"OpenStack ストレージボリュームやイメージの、ある時点でのコピー。ストレージの" -"ボリュームスナップショットは、ボリュームをバックアップするために使用する。イ" -"メージスナップショットは、データのバックアップを行ったり、新しいサーバー用の" -"「ゴールド」イメージ(設定済みイメージ)としてバックアップしたりするのに使用" -"する。" - -msgid "" -"A pre-made VM image that serves as a cloudpipe server. Essentially, OpenVPN " -"running on Linux." -msgstr "" -"cloudpipe サーバとしてサービスを行う為の、予め用意された VM イメージ。本質的" -"には Linux 上で実行される OpenVPN。" - -msgid "" -"A process that is created when a RPC call is executed; used to push the " -"message to the topic exchange." -msgstr "" -"RPC コールが実行されるときに作成されるプロセス。メッセージをトピック交換者に" -"プッシュするために使用される。" - -msgid "" -"A process that runs in the background and waits for requests. May or may not " -"listen on a TCP or UDP port. Do not confuse with a worker." -msgstr "" -"バックグラウンドで動作し、リクエストを待機するプロセス。TCP ポートや UDP ポー" -"トをリッスンする可能性がある。ワーカーとは異なる。" - -msgid "" -"A program that keeps the Image service VM image cache at or below its " -"configured maximum size." -msgstr "" -"Image service の仮想マシンイメージキャッシュを設定した最大値以下に保つプログ" -"ラム。" - -msgid "" -"A programming language that is used to create systems that involve more than " -"one computer by way of a network." -msgstr "" -"ネットワーク経由で複数のコンピューターが関連するシステムを作成するために使用" -"されるプログラミング言語。" - -msgid "" -"A project that is not officially endorsed by the OpenStack Foundation. If " -"the project is successful enough, it might be elevated to an incubated " -"project and then to a core project, or it might be merged with the main code " -"trunk." -msgstr "" -"OpenStack Foundation で公認されていないプロジェクト。プロジェクトが充分成功し" -"た場合、育成プロジェクトに昇格し、その後コアプロジェクトに昇格する事がある。" -"あるいはメインの code trunk にマージされる事もある。" - -msgid "" -"A project that ports the shell script-based project named DevStack to Python." -msgstr "" -"DevStack という名前のシェルスクリプトベースのプロジェクトを Python に移植する" -"プロジェクト。" - -msgid "A recommended architecture for an OpenStack cloud." -msgstr "OpenStack クラウドの推奨アーキテクチャー。" - -msgid "" -"A record that specifies information about a particular domain and belongs to " -"the domain." -msgstr "特定のドメインに関する情報を指定し、ドメインに所属するレコード。" - -msgid "" -"A remote, mountable file system in the context of the Shared File Systems. " -"You can mount a share to, and access a share from, several hosts by several " -"users at a time." -msgstr "" -"Shared File System サービスにおいて、リモートのマウント可能なファイルシステム" -"のこと。同時に、複数のユーザーが複数のホストから、共有をマウントしたり、アク" -"セスしたりできる。" - -msgid "A routing algorithm in the Compute RabbitMQ." -msgstr "Compute RabbitMQ におけるルーティングアルゴリズム。" - -msgid "" -"A routing table that is created within the Compute RabbitMQ during RPC " -"calls; one is created for each RPC call that is invoked." -msgstr "" -"RPC コール中に Compute RabbitMQ 内で作成されるルーティングテーブル。関連する" -"各 RPC コールに対して作成されるもの。" - -msgid "" -"A running VM, or a VM in a known state such as suspended, that can be used " -"like a hardware server." -msgstr "" -"実行中の仮想マシン。または、一時停止などの既知の状態にある仮想マシン。ハード" -"ウェアサーバーのように使用できる。" - -msgid "" -"A scheduling method used by Compute that randomly chooses an available host " -"from the pool." -msgstr "" -"利用可能なホストをプールからランダムに選択する、Compute により使用されるスケ" -"ジューリング方式。" - -msgid "A scripting language that is used to build web pages." -msgstr "Web ページを構築するために使用されるスクリプト言語。" - -msgid "" -"A security model that focuses on data confidentiality and controlled access " -"to classified information. This model divide the entities into subjects and " -"objects. The clearance of a subject is compared to the classification of the " -"object to determine if the subject is authorized for the specific access " -"mode. The clearance or classification scheme is expressed in terms of a " -"lattice." -msgstr "" -"データの機密性、および区分けした情報へのアクセスの制御に注力したセキュリ" -"ティーモデル。このモデルは、エンティティーをサブジェクト (主体) とオブジェク" -"ト (対象) に分ける。サブジェクトが特定のアクセスモードを許可されるかどうかを" -"判断するために、サブジェクトの権限がオブジェクトの区分と比較される。権限や区" -"分のスキーマは、格子モデルで表現される。" - -msgid "" -"A server daemon that serves the Nova Cert service for X509 certificates. " -"Used to generate certificates for ``euca-bundle-image``. Only needed for the " -"EC2 API." -msgstr "" -"X509 証明書用の Nova Cert サービスを提供するサーバーデーモン。 ``euca-bundle-" -"image`` 用の証明書を生成するのに使用されます。 EC2 API を使用する場合にのみ必" -"要です。" - -msgid "" -"A set of OpenStack resources created and managed by the Orchestration " -"service according to a given template (either an AWS CloudFormation template " -"or a Heat Orchestration Template (HOT))." -msgstr "" -"指定されたテンプレート (AWS CloudFormation テンプレートまたは Heat " -"Orchestration Template (HOT)) に基づいて、Orchestration により作成、管理され" -"る OpenStack リソース群。" - -msgid "" -"A set of network traffic filtering rules that are applied to a Compute " -"instance." -msgstr "" -"Compute のインスタンスに適用される、ネットワーク通信のフィルタリングルールの" -"集合。" - -msgid "" -"A set of segment objects that Object Storage combines and sends to the " -"client." -msgstr "" -"Object Storage が結合し、クライアントに送信する、オブジェクトの断片の塊。" - -msgid "" -"A simple certificate authority provided by Compute for cloudpipe VPNs and VM " -"image decryption." -msgstr "" -"cloudpipe VPN と仮想マシンイメージの復号のために、Compute により提供される簡" -"単な認証局。" - -msgid "" -"A special Object Storage object that contains the manifest for a large " -"object." -msgstr "" -"大きなオブジェクト向けのマニフェストを含む、特別な Object Storage のオブジェ" -"クト。" - -msgid "" -"A special type of VM image that is booted when an instance is placed into " -"rescue mode. Allows an administrator to mount the file systems for an " -"instance to correct the problem." -msgstr "" -"インスタンスがレスキューモード時に起動する、特別な種類の仮想マシンイメージ。" -"管理者が問題を修正するために、インスタンスのファイルシステムをマウントでき" -"る。" - -msgid "" -"A specification that, when implemented by a physical PCIe device, enables it " -"to appear as multiple separate PCIe devices. This enables multiple " -"virtualized guests to share direct access to the physical device, offering " -"improved performance over an equivalent virtual device. Currently supported " -"in OpenStack Havana and later releases." -msgstr "" -"物理 PCIe デバイスにより実装されるとき、複数の別々の PCIe デバイスとして見え" -"るようにできる仕様。これにより、複数の仮想化ゲストが物理デバイスへの直接アク" -"セスを共有できるようになる。同等の仮想デバイス経由より性能を改善できる。" - -msgid "" -"A standardized interface for managing compute, data, and network resources, " -"currently unsupported in OpenStack." -msgstr "" -"コンピュート、データ、ネットワークのリソースを管理するための標準的なインター" -"フェース。現在 OpenStack でサポートされない。" - -msgid "" -"A string of text provided to the client after authentication. Must be " -"provided by the user or process in subsequent requests to the API endpoint." -msgstr "" -"認証後にクライアントに提供されるテキスト文字列。API エンドポイントに続くリク" -"エストにおいて、ユーザーまたはプロセスにより提供される必要がある。" - -msgid "" -"A subset of API calls that are accessible to authorized administrators and " -"are generally not accessible to end users or the public Internet. They can " -"exist as a separate service (keystone) or can be a subset of another API " -"(nova)." -msgstr "" -"認可された管理者がアクセスでき、一般的にエンドユーザーとパブリックなインター" -"ネットがアクセスできない、API コールのサブセット。専用のサービス (keystone) " -"が存在し、他の API (nova) のサブセットになる可能性がある。" - -msgid "" -"A system by which Internet domain name-to-address and address-to-name " -"resolutions are determined. DNS helps navigate the Internet by translating " -"the IP address into an address that is easier to remember. For example, " -"translating 111.111.111.1 into www.yahoo.com. All domains and their " -"components, such as mail servers, utilize DNS to resolve to the appropriate " -"locations. DNS servers are usually set up in a master-slave relationship " -"such that failure of the master invokes the slave. DNS servers might also be " -"clustered or replicated such that changes made to one DNS server are " -"automatically propagated to other active servers. In Compute, the support " -"that enables associating DNS entries with floating IP addresses, nodes, or " -"cells so that hostnames are consistent across reboots." -msgstr "" -"インターネットのドメイン名からアドレス、アドレスからドメイン名に名前解決する" -"システム。DNS は、IP アドレスを人間が覚えやすいアドレスに変換することにより、" -"インターネットを参照しやすくする。例えば、111.111.111.1 を www.yahoo.com に変" -"換する。すべてのドメイン、メールサーバーなどのコンポーネントは、DNS を利用し" -"て、適切な場所を解決する。DNS サーバーは、マスターの障害がスレーブにより助け" -"られるよう、一般的にマスターとスレーブの関係で構築する。DNS サーバーは、ある " -"DNS サーバーへの変更が他の動作中のサーバーに自動的に反映されるよう、クラス" -"ター化やレプリケーションされることもある。 Compute では、 Floating IP アドレ" -"ス、ノード、セルを DNS エントリーに関連付けることができ、リブートの前後でホス" -"ト名が変わらないようにできます。" - -msgid "" -"A system that provides services to other system entities. In case of " -"federated identity, OpenStack Identity is the service provider." -msgstr "" -"サービスを他のシステムエンティティーに提供するシステム。連合認証の場合、" -"OpenStack Identity がサービスプロバイダーとなる。" - -msgid "" -"A tool to automate system configuration and installation on Debian-based " -"Linux distributions." -msgstr "" -"Debian 系の Linux ディストリビューションでシステム設定やインストールを自動化" -"するツール。" - -msgid "" -"A tool to automate system configuration and installation on Red Hat, Fedora, " -"and CentOS-based Linux distributions." -msgstr "" -"Red Hat、Fedora、CentOS 系の Linux ディストリビューションにおいて、システム設" -"定とインストールを自動化するためのツール。" - -msgid "A type of VM image that exists as a single, bootable file." -msgstr "単独の、ブート可能なファイルとして存在する仮想マシンイメージの形式。" - -msgid "" -"A type of image file that is commonly used for animated images on web pages." -msgstr "Web ページのアニメーション画像によく使用される画像ファイルの形式。" - -msgid "" -"A type of reboot where a physical or virtual power button is pressed as " -"opposed to a graceful, proper shutdown of the operating system." -msgstr "" -"きちんとした正常なOSのシャットダウンを行わず、物理又は仮想電源ボタンを押すタ" -"イプの再起動。" - -msgid "A unique ID given to each replica of an Object Storage database." -msgstr "Object Storage データベースの各レプリカに与えられる一意な ID。" - -msgid "" -"A unit of storage within Object Storage used to store objects. It exists on " -"top of devices and is replicated for fault tolerance." -msgstr "" -"オブジェクトを保存するために使用される、Object Storage 内の保存単位。デバイス" -"の上位に存在し、耐障害のために複製される。" - -msgid "" -"A user specifies their username and password credentials to interact with " -"OpenStack, using any client command. These credentials can be specified " -"using various mechanisms, namely, the environment variable or command-line " -"argument. It is not safe to specify the password using either of these " -"methods." -msgstr "" -"ユーザーは、何らかのクライアントコマンドを使用して、ユーザー名とパスワードを" -"指定して、OpenStack を使用します。これらのクレデンシャルは、環境変数やコマン" -"ドライン引数など、さまざまな方法により指定できます。これらの方法を用いて、パ" -"スワードを指定することは安全ではありません。" - -msgid "" -"A user-created Python module that is loaded by horizon to change the look " -"and feel of the dashboard." -msgstr "" -"ダッシュボードのルックアンドフィールを変更する為に Horizon がロードする、ユー" -"ザが作成した Python モジュール。" - -msgid "" -"A virtual network port within Networking; VIFs / vNICs are connected to a " -"port." -msgstr "" -"Networking 内の仮想ネットワークポート。仮想インターフェースや仮想 NIC は、" -"ポートに接続されます。" - -msgid "" -"A virtual network that provides connectivity between entities. For example, " -"a collection of virtual ports that share network connectivity. In Networking " -"terminology, a network is always a layer-2 network." -msgstr "" -"エンティティ間の接続性を提供する仮想ネットワーク。例えば、ネットワーク接続性" -"を共有する仮想ポート群。Networking の用語では、ネットワークは必ず L2 ネット" -"ワークを意味する。" - -msgid "" -"A volume is a detachable block storage device, similar to a USB hard drive. " -"You can attach a volume to only one instance. To create and manage volumes, " -"you use a combination of ``nova`` and ``cinder`` client commands." -msgstr "" -"ボリュームは、USB ハードディスクのように、着脱可能なブロックストレージです。" -"ボリュームは、インスタンス 1 つだけに接続できます。``nova`` コマンドと " -"``cinder`` コマンドを組み合わせて、ボリュームを作成して管理します。" - -msgid "A web framework used extensively in horizon." -msgstr "Horizon 中で広く使用される Web フレームワーク。" - -msgid "" -"A worker daemon that creates and terminates virtual machine instances " -"through hypervisor APIs. For example:" -msgstr "" -"ハイパーバイザー API を使用して仮想マシンインスタンスの作成、終了を行うワー" -"カーデーモン。例えば、以下のようなハイパーバイザー API に対応しています。" - -msgid "" -"A worker process that verifies the integrity of Object Storage objects, " -"containers, and accounts. Auditors is the collective term for the Object " -"Storage account auditor, container auditor, and object auditor." -msgstr "" -"Object Storage のオブジェクト、コンテナー、アカウントの完全性を検証するワー" -"カープロセス。auditor は、Object Storage アカウント auditor、コンテナー " -"auditor、オブジェクト auditor の総称。" - -msgid "" -"A wrapper used by the Image service that contains a VM image and its " -"associated metadata, such as machine state, OS disk size, and so on." -msgstr "" -"仮想マシンイメージ、および、マシンの状態や OS ディスク容量などの関連メタデー" -"タを含む、Image service により使用されるラッパー。" - -msgid "ACL" -msgstr "ACL" - -msgid "API endpoint" -msgstr "API エンドポイント" - -msgid "API extension" -msgstr "API 拡張" - -msgid "API extension plug-in" -msgstr "API 拡張プラグイン" - -msgid "API key" -msgstr "API キー" - -msgid "API server" -msgstr "API サーバー" - -msgid "API token" -msgstr "API トークン" - -msgid "" -"API used to access OpenStack Networking. Provides an extensible architecture " -"to enable custom plug-in creation." -msgstr "" -"OpenStack Networking にアクセスするために利用する API。独自プラグインを作成で" -"きる拡張性を持ったアーキテクチャーになっている。" - -msgid "API used to access OpenStack Object Storage." -msgstr "OpenStack Object Storage にアクセスするために使用する API。" - -msgid "API version" -msgstr "API バージョン" - -msgid "ATA over Ethernet (AoE)" -msgstr "ATA over Ethernet (AoE)" - -msgid "AWS" -msgstr "AWS" - -msgid "" -"AWS CloudFormation allows AWS users to create and manage a collection of " -"related resources. The Orchestration service supports a CloudFormation-" -"compatible format (CFN)." -msgstr "" -"AWS CloudFormation により、AWS ユーザーは関連するリソース群を作成し、管理でき" -"るようになる。オーケストレーションサービスは CloudFormation 互換形式 (CFN) を" -"サポートする。" - -msgid "AWS CloudFormation template" -msgstr "AWS CloudFormation テンプレート" - -msgid "" -"Absolute limit on the amount of network traffic a Compute VM instance can " -"send and receive." -msgstr "" -"Compute の仮想マシンインスタンスが送受信できるネットワーク通信量の絶対制限。" - -msgid "Accept a volume transfer request" -msgstr "ボリュームの譲渡要求の受理" - -msgid "Accept the request:" -msgstr "要求を確定します。" - -msgid "" -"Accepts API requests, and routes them to the ``cinder-volume`` for action." -msgstr "" -"API リクエストを受け付け、それらを処理するために ``cinder-volume`` に中継しま" -"す。" - -msgid "Accepts Image API calls for image discovery, retrieval, and storage." -msgstr "イメージの検索、取得、保存の Image API を受け付けます。" - -msgid "" -"Accepts OpenStack Object Storage API and raw HTTP requests to upload files, " -"modify metadata, and create containers. It also serves file or container " -"listings to web browsers. To improve performance, the proxy server can use " -"an optional cache that is usually deployed with memcache." -msgstr "" -"OpenStack Object Storage API と生の HTTP リクエストを受け付け、ファイルのアッ" -"プロード、メタデータの変更、コンテナーの作成などを行います。ウェブブラウザー" -"に対するファイルやコンテナーの一覧表示も行えます。性能を向上させるために、プ" -"ロキシーサーバーがキャッシュを使うこともできます。通常はキャッシュには " -"memcache が使用されます。" - -msgid "" -"Accepts and responds to end user compute API calls. The service supports the " -"OpenStack Compute API, the Amazon EC2 API, and a special Admin API for " -"privileged users to perform administrative actions. It enforces some " -"policies and initiates most orchestration activities, such as running an " -"instance." -msgstr "" -"エンドユーザーからの compute API 呼び出しを受け取り応答します。このサービス" -"は OpenStack Compute API、Amazon EC2 API 、および、特権ユーザーが管理用操作を" -"実行するための特別な管理 API をサポートしています。ポリシーの適用を行います。" -"インスタンス起動などのほとんどの処理がこのサービスから開始されます。" - -msgid "" -"Accepts and routes API requests to the appropriate OpenStack Networking plug-" -"in for action." -msgstr "" -"API リクエストを受け付け、適切な OpenStack Networking プラグインに処理を中継" -"します。" - -msgid "Access associated with a VM" -msgstr "割り当てられた VM がアクセス" - -msgid "Access can be provided to a VM" -msgstr "VM からアクセス可能" - -msgid "" -"Access the Database service instance using typical database access commands. " -"For example, with MySQL:" -msgstr "" -"一般的なデータベースアクセスコマンドを使用して、Database サービスのインスタン" -"スにアクセスします。MySQL の例:" - -msgid "Account servers (swift-account-server)" -msgstr "アカウントサーバー (swift-account-server)" - -msgid "Active Directory" -msgstr "Active Directory" - -msgid "" -"Acts as the gatekeeper to Object Storage and is responsible for " -"authenticating the user." -msgstr "Object Storage へのゲートとして動作する。ユーザーの認証に責任を持つ。" - -msgid "" -"Add a line to include your newly created style sheet. For example, ``custom." -"css`` file:" -msgstr "" -"新しく作成したスタイルシートを含む行を追加します。``custom.css`` ファイルの" -"例::" - -msgid "Address Resolution Protocol (ARP)" -msgstr "Address Resolution Protocol (ARP)" - -msgid "Administrator configures size setting, based on flavors" -msgstr "管理者がフレーバーに基づいてサイズを設定" - -msgid "Advanced Message Queuing Protocol (AMQP)" -msgstr "Advanced Message Queuing Protocol (AMQP)" - -msgid "Advanced RISC Machine (ARM)" -msgstr "Advanced RISC Machine (ARM)" - -msgid "" -"After the volume recipient, or new owner, accepts the transfer, you can see " -"that the transfer is no longer available:" -msgstr "" -"ボリュームの転送先 (新しい所有者) が転送を確定した後は、その転送がすでに利用" -"できない状態になっていることが分かります。" - -msgid "" -"After you restart the Image service, you can use the following syntax to " -"view the image's location information:" -msgstr "" -"Image service を再起動してから、以下の構文を使用してイメージの場所情報を確認" -"します。" - -msgid "After you upload an image, you cannot change it." -msgstr "イメージをアップロードした後は、イメージを変更できません。" - -msgid "" -"All OpenStack core projects are provided under the terms of the Apache " -"License 2.0 license." -msgstr "" -"すべての OpenStack コアプロジェクトは Apache License 2.0 ライセンスの条件で提" -"供されている。" - -msgid "" -"Allows a user to set a flag on an Object Storage container so that all " -"objects within the container are versioned." -msgstr "" -"コンテナー内のすべてのオブジェクトがバージョンを付けられるように、ユーザーが " -"Object Storage のコンテナーにフラグを設定できる。" - -msgid "Alphanumeric ID assigned to each Identity service role." -msgstr "各 Identity service ロールに割り当てられる英数 ID。" - -msgid "Alternative name for the Block Storage API." -msgstr "Block Storage API の別名。" - -msgid "Alternative name for the glance image API." -msgstr "Glance イメージ API の別名。" - -msgid "Alternative term for a Networking plug-in or Networking API extension." -msgstr "Networking プラグインや Networking API 拡張の別名。" - -msgid "Alternative term for a RabbitMQ message exchange." -msgstr "RabbitMQ メッセージ交換の別名。" - -msgid "Alternative term for a VM image." -msgstr "VM イメージの別名。" - -msgid "Alternative term for a VM instance type." -msgstr "VM インスタンスタイプの別名。" - -msgid "Alternative term for a VM or guest." -msgstr "仮想マシンやゲストの別名。" - -msgid "Alternative term for a cloud controller node." -msgstr "クラウドコントローラーノードの別名。" - -msgid "Alternative term for a cloudpipe." -msgstr "cloudpipe の別名。" - -msgid "Alternative term for a fixed IP address." -msgstr "Fixed IP アドレスの別名。" - -msgid "Alternative term for a flavor ID." -msgstr "フレーバー ID の別名。" - -msgid "Alternative term for a non-durable exchange." -msgstr "非永続交換の別名。" - -msgid "Alternative term for a non-durable queue." -msgstr "非永続キューの別名。" - -msgid "Alternative term for a paused VM instance." -msgstr "一時停止 VM インスタンスの別名。" - -msgid "Alternative term for a virtual network." -msgstr "仮想ネットワークの別名。" - -msgid "Alternative term for a volume plug-in." -msgstr "ボリュームプラグインの別名。" - -msgid "" -"Alternative term for an API extension or plug-in. In the context of Identity " -"service, this is a call that is specific to the implementation, such as " -"adding support for OpenID." -msgstr "" -"API 拡張やプラグインの別名。Identity service では、OpenID のサポートの追加な" -"ど、特定の実装を意味する。" - -msgid "Alternative term for an API token." -msgstr "API トークンの別名。" - -msgid "Alternative term for an Amazon EC2 access key. See EC2 access key." -msgstr "Amazon EC2 アクセスキーの別名。EC2 アクセスキー参照。" - -msgid "Alternative term for an Identity service catalog." -msgstr "Identity サービスカタログの別名。" - -msgid "Alternative term for an Identity service default token." -msgstr "Identity service デフォルトトークンの別名。" - -msgid "Alternative term for an Object Storage authorization node." -msgstr "Object Storage 認可ノードの別名。" - -msgid "Alternative term for an admin API." -msgstr "管理 API(admin API)の別名。" - -msgid "Alternative term for an ephemeral volume." -msgstr "エフェメラルボリュームの別名。" - -msgid "Alternative term for an image." -msgstr "イメージの別名。" - -msgid "Alternative term for instance UUID." -msgstr "インスタンス UUID の別名。" - -msgid "Alternative term for non-durable." -msgstr "非永続の別名。" - -msgid "Alternative term for tenant." -msgstr "テナントの別名。" - -msgid "Alternative term for the Compute API." -msgstr "Compute API の別名。" - -msgid "Alternative term for the Identity service API." -msgstr "Identity service API の別名。" - -msgid "Alternative term for the Identity service catalog." -msgstr "Identity サービスカタログの別名。" - -msgid "Alternative term for the Image service image registry." -msgstr "Image service イメージレジストリの別名。" - -msgid "Alternative term for the Image service registry." -msgstr "Image service レジストリの別名。" - -msgid "Alternatively, users can set ``os_distro`` to a URL:" -msgstr "代わりに、ユーザーは ``os_distro`` を URL に設定できます。" - -msgid "" -"Alternatively, you can create the ``PROJECT-openrc.sh`` file from scratch, " -"if you cannot download the file from the dashboard." -msgstr "" -"何らかの理由によりダッシュボードからファイルをダウンロードできない場合、代わ" -"りに最初から ``PROJECT-openrc.sh`` ファイルを作成できます。" - -msgid "Amazon Kernel Image (AKI)" -msgstr "Amazon Kernel Image (AKI)" - -msgid "Amazon Machine Image (AMI)" -msgstr "Amazon Machine Image (AMI)" - -msgid "Amazon Ramdisk Image (ARI)" -msgstr "Amazon Ramdisk Image (ARI)" - -msgid "Amazon Web Services." -msgstr "Amazon Web Services。" - -msgid "" -"An API endpoint used for both service-to-service communication and end-user " -"interactions." -msgstr "" -"サービス間通信やエンドユーザーの操作などに使用される API エンドポイント。" - -msgid "" -"An API on a separate endpoint for attaching, detaching, and creating block " -"storage for compute VMs." -msgstr "" -"コンピュート VM 用のブロックストレージの作成、接続、接続解除を行うための API " -"で、独立したエンドポイントとして提供される。" - -msgid "An API server (``aodh-api``)" -msgstr "API サーバー (``aodh-api``)" - -msgid "An API server (``ceilometer-api``)" -msgstr "API サーバー (``ceilometer-api``)" - -msgid "An API that is accessible to tenants." -msgstr "テナントにアクセス可能な API。" - -msgid "" -"An AWS Query API that is compatible with AWS CloudFormation. It processes " -"API requests by sending them to the ``heat-engine`` over RPC." -msgstr "" -"AWS CloudFormation 互換の AWS Query API を提供します。受け取った API リスクエ" -"ストを RPC 経由で ``heat-engine`` に送信します。" - -msgid "" -"An Amazon EBS storage volume that contains a bootable VM image, currently " -"unsupported in OpenStack." -msgstr "" -"ブート可能な仮想マシンイメージを含む Amazon EBS ストレージボリューム。現在 " -"OpenStack では未サポート。" - -msgid "" -"An Amazon EC2 concept of an isolated area that is used for fault tolerance. " -"Do not confuse with an OpenStack Compute zone or cell." -msgstr "" -"耐障害性のために使用されるエリアを分離する Amazon EC2 の概念。OpenStack " -"Compute のゾーンやセルと混同しないこと。" - -msgid "" -"An IP address that a project can associate with a VM so that the instance " -"has the same public IP address each time that it boots. You create a pool of " -"floating IP addresses and assign them to instances as they are launched to " -"maintain a consistent IP address for maintaining DNS assignment." -msgstr "" -"インスタンスを起動するたびに同じパブリック IP アドレスを持てるように、プロ" -"ジェクトが仮想マシンに関連付けられる IP アドレス。DNS 割り当てを維持するため" -"に、Floating IP アドレスのプールを作成し、インスタンスが起動するたびにそれら" -"をインスタンスに割り当て、一貫した IP アドレスを維持します。" - -msgid "" -"An IP address that can be assigned to a VM instance within the shared IP " -"group. Public IP addresses can be shared across multiple servers for use in " -"various high-availability scenarios. When an IP address is shared to another " -"server, the cloud network restrictions are modified to enable each server to " -"listen to and respond on that IP address. You can optionally specify that " -"the target server network configuration be modified. Shared IP addresses can " -"be used with many standard heartbeat facilities, such as keepalive, that " -"monitor for failure and manage IP failover." -msgstr "" -"共有 IP グループ内の仮想マシンインスタンスに割り当てられる IP アドレス。パブ" -"リック IP アドレスは、さまざまな高可用性のシナリオで使用するために複数サー" -"バーにまたがり共有できる。IP アドレスが別のサーバーと共有されるとき、クラウド" -"のネットワーク制限が変更され、各サーバーがリッスンでき、その IP アドレスに応" -"答できるようになる。オプションとして、対象サーバーの変更するネットワーク設定" -"を指定できる。共有 IP アドレスは、keepalive などの多くの標準的なハートビート" -"機能と一緒に使用でき、エラーをモニターし、IP のフェイルオーバーを管理しる。" - -msgid "An IP address that is accessible to end-users." -msgstr "エンドユーザがアクセス可能な IP アドレス。" - -msgid "" -"An IP address that is associated with the same instance each time that " -"instance boots, is generally not accessible to end users or the public " -"Internet, and is used for management of the instance." -msgstr "" -"インスタンス起動時に毎回同じインスタンスに割当られるIPアドレス(一般に、エン" -"ドユーザやパブリックインターネットからはアクセス出来ない)。インスタンスの管" -"理に使用される。" - -msgid "" -"An IP address used for management and administration, not available to the " -"public Internet." -msgstr "" -"管理のために使用される IP アドレス。パブリックなインターネットから利用できま" -"せん。" - -msgid "" -"An IP address, typically assigned to a router, that passes network traffic " -"between different networks." -msgstr "" -"異なるネットワーク間でネットワーク通信を中継する、IP アドレス。一般的にはルー" -"ターに割り当てられる。" - -msgid "" -"An Identity API v3 entity. Represents a collection of projects, groups and " -"users that defines administrative boundaries for managing OpenStack Identity " -"entities. On the Internet, separates a website from other sites. Often, the " -"domain name has two or more parts that are separated by dots. For example, " -"yahoo.com, usa.gov, harvard.edu, or mail.yahoo.com. Also, a domain is an " -"entity or container of all DNS-related information containing one or more " -"records." -msgstr "" -"Identity API v3 のエンティティー。プロジェクト、グループ、ユーザーの集合体" -"で、 OpenStack Identity のエンティティー管理において管理境界を定義するための" -"ものである。インターネット分野では、ドメインによりウェブサイトが区別され、多" -"くの場合、ドメイン名はドット区切りの 2 以上の部分から構成される。例えば、 " -"yahoo.com, usa.gov, harvard.edu, mail.yahoo.com など。また、ドメインは、DNS " -"関連情報のエンティティーや 1 つ以上のレコードを持つ DNS 関連の情報の入れ物を" -"表すのにも使用される。" - -msgid "" -"An Identity service API access token that is associated with a specific " -"tenant." -msgstr "特定のテナントに関連付けられた Identity service API アクセストークン。" - -msgid "" -"An Identity service API endpoint that is associated with one or more tenants." -msgstr "" -"1 つ以上のテナントと関連付けられた Identity service API エンドポイント。" - -msgid "" -"An Identity service component that manages and validates tokens after a user " -"or tenant has been authenticated." -msgstr "" -"ユーザーやテナントが認証された後、トークンを管理し、検証する Identity のコン" -"ポーネント。" - -msgid "" -"An Identity service feature that enables services, such as Compute, to " -"automatically register with the catalog." -msgstr "" -"自動的にカタログに登録するために、Compute などのサービスを有効化する、" -"Identity の機能。" - -msgid "" -"An Identity service that lists API endpoints that are available to a user " -"after authentication with the Identity service." -msgstr "" -"ユーザーが Identity で認証後、利用可能な API エンドポイントを一覧表示する、" -"Identity のサービス。" - -msgid "" -"An Identity service token that is not associated with a specific tenant and " -"is exchanged for a scoped token." -msgstr "" -"特定のテナントに関連づけられていない、スコープ付きトークンのために交換され" -"る、Identity のトークン。" - -msgid "" -"An Identity v3 API entity. Represents a collection of users that is owned by " -"a specific domain." -msgstr "" -"Identity v3 API のエンティティーで、特定のドメイン内のユーザーの集合を表す。" - -msgid "An Image service VM image that is available to all tenants." -msgstr "すべてのテナントが利用できる Image service の仮想マシンイメージ。" - -msgid "An Image service VM image that is only available to specified tenants." -msgstr "指定したテナントのみで利用可能な Image service の仮想マシンイメージ。" - -msgid "" -"An Image service container format that indicates that no container exists " -"for the VM image." -msgstr "" -"仮想マシンイメージ用のコンテナーが存在しないことを意味する、Image service の" -"コンテナー形式。" - -msgid "" -"An Image service that provides VM image metadata information to clients." -msgstr "" -"クライアントに仮想マシンイメージメタデータ情報を提供する Image service。" - -msgid "" -"An Internet Protocol (IP) address configured on the load balancer for use by " -"clients connecting to a service that is load balanced. Incoming connections " -"are distributed to back-end nodes based on the configuration of the load " -"balancer." -msgstr "" -"負荷分散するサービスへのクライアント接続に使用される負荷分散装置において設定" -"される IP アドレス。受信の接続が、負荷分散の設定に基づいて、バックエンドの" -"ノードに分散される。" - -msgid "An L2 network segment within Networking." -msgstr "Networking 内の L2 ネットワークセグメント。" - -msgid "An Object Storage component that collects meters." -msgstr "計測項目を収集する Object Storage のコンポーネント。" - -msgid "" -"An Object Storage component that copies an object to remote partitions for " -"fault tolerance." -msgstr "" -"耐障害性のためにオブジェクトをリモートパーティションをコピーする Object " -"Storage コンポーネント。" - -msgid "" -"An Object Storage component that copies changes in the account, container, " -"and object databases to other nodes." -msgstr "" -"アカウント、コンテナー、オブジェクトデータベースを他のノードに変更点をコピー" -"する Object Storage コンポーネント。" - -msgid "An Object Storage component that is responsible for managing objects." -msgstr "オブジェクトの管理に責任を持つ Object Storage のコンポーネント。" - -msgid "" -"An Object Storage component that provides account services such as list, " -"create, modify, and audit. Do not confuse with OpenStack Identity service, " -"OpenLDAP, or similar user-account services." -msgstr "" -"一覧表示、作成、変更、監査などのアカウントサービスを提供する、Object Storage " -"のコンポーネント。OpenStack Identity、OpenLDAP、類似のユーザーアカウントサー" -"ビスなどと混同しないこと。" - -msgid "" -"An Object Storage large object that has been broken up into pieces. The re-" -"assembled object is called a concatenated object." -msgstr "" -"部品に分割された Object Storage の大きなオブジェクト。再構築されたオブジェク" -"トは、連結オブジェクトと呼ばれる。" - -msgid "" -"An Object Storage middleware component that enables creation of URLs for " -"temporary object access." -msgstr "" -"一時的なオブジェクトアクセスのために URL を作成できる Object Storage ミドル" -"ウェアコンポーネント。" - -msgid "An Object Storage node that provides authorization services." -msgstr "認可サービスを提供する Object Storage ノード。" - -msgid "" -"An Object Storage node that provides container services, account services, " -"and object services; controls the account databases, container databases, " -"and object storage." -msgstr "" -"コンテナーサービス、アカウントサービス、オブジェクトサービスを提供する " -"Object Storage のノード。アカウントデータベース、コンテナーデータベース、オブ" -"ジェクトデータベースを制御する。" - -msgid "An Object Storage server that manages containers." -msgstr "コンテナーを管理する Object Storage サーバー。" - -msgid "" -"An Object Storage worker that scans for and deletes account databases and " -"that the account server has marked for deletion." -msgstr "" -"アカウントサーバーが削除する印を付けた、アカウントデータベースをスキャンし、" -"削除する、Object Storage のワーカー。" - -msgid "" -"An OpenStack core project that provides discovery, registration, and " -"delivery services for disk and server images. The project name of the Image " -"service is glance." -msgstr "" -"ディスクやサーバーイメージ向けのサービスの検索、登録、配信を提供する " -"OpenStack コアプロジェクト。Image service のプロジェクト名は glance。" - -msgid "An OpenStack core project that provides object storage services." -msgstr "オブジェクトストレージサービスを提供する OpenStack コアプロジェクト。" - -msgid "" -"An OpenStack grouped release of projects that came out in the spring of " -"2011. It included Compute (nova), Object Storage (swift), and the Image " -"service (glance). Cactus is a city in Texas, US and is the code name for the " -"third release of OpenStack. When OpenStack releases went from three to six " -"months long, the code name of the release changed to match a geography " -"nearest the previous summit." -msgstr "" -"2011年春に登場した OpenStack 関連プロジェクトのリリース。Compute (nova)、" -"Object Storage (swift)、Image service (glance) が含まれる。Cactus は、アメリ" -"カ合衆国テキサス州の都市であり、OpenStack の 3 番目のリリースのコード名であ" -"る。OpenStack のリリース間隔が 3 か月から 6 か月になったとき、リリースのコー" -"ド名が前のサミットと地理的に近いところになるように変更された。" - -msgid "" -"An OpenStack service that provides a set of services for management of " -"shared file systems in a multi-tenant cloud environment. The service is " -"similar to how OpenStack provides block-based storage management through the " -"OpenStack Block Storage service project. With the Shared File Systems " -"service, you can create a remote file system and mount the file system on " -"your instances. You can also read and write data from your instances to and " -"from your file system. The project name of the Shared File Systems service " -"is manila." -msgstr "" -"マルチテナントのクラウド環境で共有ファイルシステムを管理するためのサービス群" -"を提供する OpenStack サービス。 OpenStack がブロックベースのストレージ管理" -"を、 OpenStack Block Storage サービスプロジェクトとして提供しているのと類似し" -"ている。 Shared File Systems サービスを使うと、リモートファイルシステムを作成" -"し、自分のインスタンスからそのファイルシステムをマウントし、インスタンスから" -"そのファイルシステムの読み書きを行える。このプロジェクトのコード名は manila。" - -msgid "" -"An OpenStack service, such as Compute, Object Storage, or Image service. " -"Provides one or more endpoints through which users can access resources and " -"perform operations." -msgstr "" -"Compute、Object Storage、Image service などの OpenStack のサービス。ユーザー" -"がリソースにアクセスしたり、操作を実行したりできる 1 つ以上のエンドポイントを" -"提供する。" - -msgid "" -"An OpenStack-native REST API that processes API requests by sending them to " -"the ``heat-engine`` over :term:`Remote Procedure Call (RPC)`." -msgstr "" -"OpenStack 独自の REST API を提供します。 受け取った API リスクエストを、 :" -"term:`リモートプロシージャコール (RPC) ` 経由" -"で ``heat-engine`` に送信します。" - -msgid "An OpenStack-provided image." -msgstr "OpenStack が提供するイメージ。" - -msgid "An OpenStack-supported hypervisor." -msgstr "OpenStack がサポートするハイパーバイザーの1つ。" - -msgid "" -"An OpenStack-supported hypervisor. KVM is a full virtualization solution for " -"Linux on x86 hardware containing virtualization extensions (Intel VT or AMD-" -"V), ARM, IBM Power, and IBM zSeries. It consists of a loadable kernel " -"module, that provides the core virtualization infrastructure and a processor " -"specific module." -msgstr "" -"OpenStack がサポートするハイパーバイザー。KVM は、仮想化拡張 (Intel VT や " -"AMD-V) を持つ x86 ハードウェア、ARM、IBM Power、IBM zSeries 上の Linux 向けの" -"完全仮想化ソリューション。" - -msgid "An administrator who has access to all hosts and instances." -msgstr "すべてのホストやインスタンスへアクセス権を持つ管理者。" - -msgid "" -"An administrator-defined token used by Compute to communicate securely with " -"the Identity service." -msgstr "" -"Identity と安全に通信するために Compute により使用される、管理者により定義さ" -"れたトークン。" - -msgid "An alarm evaluator (``aodh-evaluator``)" -msgstr "アラーム評価器 (``aodh-evaluator``)" - -msgid "An alarm notifier (``aodh-notifier``)" -msgstr "アラーム通知器 (``aodh-notifier``)" - -msgid "" -"An alpha-numeric string of text used to access OpenStack APIs and resources." -msgstr "OpenStack API やリソースへのアクセスに使用される英数字文字列。" - -msgid "An alternative name for Networking API." -msgstr "Networking API の別名。" - -msgid "" -"An application protocol for accessing and maintaining distributed directory " -"information services over an IP network." -msgstr "" -"IP ネットワーク上の分散ディレクトリー情報サービスへのアクセスと管理を行うため" -"のアプリケーションプロトコル。" - -msgid "" -"An application protocol for distributed, collaborative, hypermedia " -"information systems. It is the foundation of data communication for the " -"World Wide Web. Hypertext is structured text that uses logical links " -"(hyperlinks) between nodes containing text. HTTP is the protocol to exchange " -"or transfer hypertext." -msgstr "" -"分散、協調、ハイパーメディア情報システム用のアプリケーションプロトコル。WWW " -"のデータ通信の基盤。ハイパーテキストは、ノード間でのテキストを含む論理リンク " -"(ハイパーリンク) を使った構造化テキストのことである。HTTP は、ハイパーテキス" -"トを交換したり転送したりするためのプロトコル。" - -msgid "" -"An application that runs on the back-end server in a load-balancing system." -msgstr "負荷分散システムでバックエンドサーバーで動作するアプリケーション。" - -msgid "" -"An authentication and authorization service for Object Storage, implemented " -"through WSGI middleware; uses Object Storage itself as the persistent " -"backing store." -msgstr "" -"Object Storage の認証と認可のサービス。WSGI ミドルウェア経由で実装される。" -"バックエンドの永続的なデータストアとして、Object Storage 自身を使用する。" - -msgid "" -"An authentication facility within Object Storage that enables Object Storage " -"itself to perform authentication and authorization. Frequently used in " -"testing and development." -msgstr "" -"Object Storage 自身が認証と認可を実行できるようになる、Object Storage 内の認" -"証機能。テストや開発によく使用される。" - -msgid "" -"An easy method to create a local LDAP directory for testing Identity and " -"Compute. Requires Redis." -msgstr "" -"Identity と Compute のテスト目的でローカルな LDAP ディレクトリーを作成するた" -"めの簡易な方法。Redis が必要。" - -msgid "" -"An element of the Compute RabbitMQ that comes to life when a RPC call is " -"executed. It connects to a direct exchange through a unique exclusive queue, " -"sends the message, and terminates." -msgstr "" -"RPC コールが実行されるとき、開始される Compute RabbitMQ の要素。一意な排他" -"キュー経由で直接交換者に接続し、メッセージを送信し、終了します。" - -msgid "" -"An element of the Compute capacity cache that is calculated based on the " -"number of build, snapshot, migrate, and resize operations currently in " -"progress on a given host." -msgstr "" -"指定されたホスト上で現在進行中の build, snapshot, migrate, resize の操作数を" -"元に計算される、Compute のキャパシティキャッシュの1要素。" - -msgid "" -"An encrypted communications protocol for secure communication over a " -"computer network, with especially wide deployment on the Internet. " -"Technically, it is not a protocol in and of itself; rather, it is the result " -"of simply layering the Hypertext Transfer Protocol (HTTP) on top of the TLS " -"or SSL protocol, thus adding the security capabilities of TLS or SSL to " -"standard HTTP communications. most OpenStack API endpoints and many inter-" -"component communications support HTTPS communication." -msgstr "" -"コンピューターネットワークで、とくにインターネットで広く使われている、安全に" -"通信を行うための暗号化通信プロトコル。技術的には、プロトコルではなく、むしろ" -"シンプルに SSL/TLS プロトコルの上に Hypertext Transfer Protocol (HTTP) を重ね" -"ているものである。そのため、SSL や TLS プロトコルのセキュリティー機能を標準的" -"な HTTP 通信に追加したものである。ほとんどの OpenStack API エンドポイントや多" -"くのコンポーネント間通信で、 HTTPS 通信がサポートされている。" - -msgid "" -"An entity in the context of the Shared File Systems that encapsulates " -"interaction with the Networking service. If the driver you selected runs in " -"the mode requiring such kind of interaction, you need to specify the share " -"network to create a share." -msgstr "" -"Shared File System サービスにおいて、Networking サービスとのやり取りを抽象化" -"するエンティティー。選択したドライバーが Networking サービスとのやり取りを必" -"要とするモードで動作している場合、共有を作成する際にネットワーク共有 (share " -"network) を指定する必要がある。" - -msgid "" -"An entity that maps Object Storage data to partitions. A separate ring " -"exists for each service, such as account, object, and container." -msgstr "" -"Object Storage データのパーティションへのマッピングを行う。アカウント、オブ" -"ジェクト、コンテナーというサービス単位に別々のリングが存在する。" - -msgid "An extra but helpful piece of practical advice." -msgstr "おまけですが、役に立つ実用的な助言です。" - -msgid "An iSCSI authentication method supported by Compute." -msgstr "Compute によりサポートされる iSCSI の認証方式。" - -msgid "" -"An in-progress specification for cloud management. Currently unsupported in " -"OpenStack." -msgstr "策定中のクラウド管理の仕様。現在、OpenStack では未サポート。" - -msgid "" -"An integrated project that aims to orchestrate multiple cloud applications " -"for OpenStack." -msgstr "" -"OpenStack に複数のクラウドアプリケーションをオーケストレーションする為に開発" -"されたプロジェクト。" - -msgid "" -"An integrated project that orchestrates multiple cloud applications for " -"OpenStack. The project name of Orchestration is heat." -msgstr "" -"OpenStack 向けに複数のクラウドアプリケーションをオーケストレーションする統合" -"プロジェクト。Orchestration のプロジェクト名は heat。" - -msgid "" -"An integrated project that provide scalable and reliable Cloud Database-as-a-" -"Service functionality for both relational and non-relational database " -"engines. The project name of Database service is trove." -msgstr "" -"リレーショナルデータベースと非リレーショナルデータベースの両エンジンに対し" -"て、スケール可能かつ信頼できるクラウド Database-as-a-Service を提供する統合プ" -"ロジェクト。この Database service の名前は trove。" - -msgid "" -"An integrated project that provides metering and measuring facilities for " -"OpenStack. The project name of Telemetry is ceilometer." -msgstr "" -"OpenStack にメータリングと計測の機能を提供する、統合プロジェクト。Telemetry " -"のプロジェクト名は ceilometer。" - -msgid "" -"An interface that is plugged into a port in a Networking network. Typically " -"a virtual network interface belonging to a VM." -msgstr "" -"Networking のネットワークにおけるポートに差し込まれるインターフェース。一般的" -"に、仮想マシンに設定された仮想ネットワークインターフェース。" - -msgid "" -"An object state in Object Storage where a new replica of the object is " -"automatically created due to a drive failure." -msgstr "" -"ドライブ故障により、オブジェクトの新しい複製が自動的に作成された、Object " -"Storage のオブジェクトの状態。" - -msgid "An object within Object Storage that is larger than 5 GB." -msgstr "5 GB より大きい Object Storage 内のオブジェクト。" - -msgid "" -"An official OpenStack service defined as core by DefCore Committee. " -"Currently, consists of Block Storage service (cinder), Compute service " -"(nova), Identity service (keystone), Image service (glance), Networking " -"service (neutron), and Object Storage service (swift)." -msgstr "" -"正式な OpenStack サービスのうち、DefCore 委員会によりコアと定義されているサー" -"ビス。現時点では、 Block Storage サービス (cinder)、 Compute サービス " -"(nova)、 Identity サービス (keystone)、 Image サービス (glance)、 Networking " -"サービス (neutron)、 Object Storage サービス (swift) です。" - -msgid "" -"An official OpenStack service defined as optional by DefCore Committee. " -"Currently, consists of Dashboard (horizon), Telemetry service (Telemetry), " -"Orchestration service (heat), Database service (trove), Bare Metal service " -"(ironic), and so on." -msgstr "" -"正式な OpenStack サービスのうち、DefCore 委員会によりオプションと定義されてい" -"るサービス。現時点では、 Dashboard (horizon), Telemetry サービス " -"(Telemetry), Orchestration サービス (heat), Database サービス (trove), Bare " -"Metal サービス(ironic) などです。" - -msgid "An open source LDAP server. Supported by both Compute and Identity." -msgstr "" -"オープンソース LDAP サーバー。Compute と Identity によりサポートされる。" - -msgid "An open source SQL toolkit for Python, used in OpenStack." -msgstr "" -"OpenStack で使われている、オープンソースの Python 用 SQL ツールキット。" - -msgid "" -"An open source community project by Dell that aims to provide all necessary " -"services to quickly deploy clouds." -msgstr "" -"クラウドの迅速なデプロイ用に全ての必要なサービスを提供する用途の、Dell による" -"オープンソースコミュニティプロジェクト。" - -msgid "" -"An operating system configuration management tool supporting OpenStack " -"deployments." -msgstr "" -"OpenStack の導入をサポートするオペレーティングシステムの設定管理ツール。" - -msgid "" -"An operating system configuration-management tool supported by OpenStack." -msgstr "OpenStackがサポートするオペレーティングシステム構成管理ツール。" - -msgid "An operating system instance running under the control of a hypervisor." -msgstr "" -"ハイパーバイザーの管理下で実行しているオペレーティングシステムのインスタン" -"ス。" - -msgid "" -"An operating system instance that runs on top of a hypervisor. Multiple VMs " -"can run at the same time on the same physical host." -msgstr "" -"ハイパーバイザー上で動作するオペレーティングシステムインスタンス。一台の物理" -"ホストで同時に複数の VM を実行できる。" - -msgid "" -"An option within Compute that enables administrators to create and manage " -"users through the ``nova-manage`` command as opposed to using the Identity " -"service." -msgstr "" -"管理者が、Identity を使用する代わりに、 ``nova-manage`` コマンド経由でユー" -"ザーを作成および管理できる、Compute 内のオプション。" - -msgid "" -"An option within Image service so that an image is deleted after a " -"predefined number of seconds instead of immediately." -msgstr "" -"イメージをすぐに削除する代わりに、事前定義した秒数経過後に削除するための、" -"Image service 内のオプション。" - -msgid "Analytics-as-a-Service for ad-hoc or bursty analytic workloads." -msgstr "" -"その場限りやバースト的な分析ワークロードに対応できる Analytics-as-a-Service" - -msgid "" -"Another option is to use the unofficial binary installer provided by " -"Christoph Gohlke (http://www.lfd.uci.edu/~gohlke/pythonlibs/#pip)." -msgstr "" -"もう 1 つの選択肢は、Christoph Gohlke さんにより提供されている非公式バイナ" -"リーインストーラー (http://www.lfd.uci.edu/~gohlke/pythonlibs/ #pip) を使用す" -"ることです。" - -msgid "Anvil" -msgstr "Anvil" - -msgid "" -"Any business that provides Internet access to individuals or businesses." -msgstr "個人や組織にインターネットアクセスを提供する何らかのビジネス。" - -msgid "" -"Any client software that enables a computer or device to access the Internet." -msgstr "" -"コンピューターやデバイスがインターネットにアクセスできる、何らかのクライアン" -"トソフトウェア。" - -msgid "Any compute node that runs the network worker daemon." -msgstr "ネットワークワーカーデーモンを実行するコンピュートノードすべて。" - -msgid "" -"Any deployment-specific information is helpful, such as whether you are " -"using Ubuntu 14.04 or are performing a multi-node installation." -msgstr "" -"環境固有の情報が役に立ちます。例えば、Ubuntu 14.04 の利用有無、複数ノードのイ" -"ンストール有無です。" - -msgid "" -"Any kind of text that contains a link to some other site, commonly found in " -"documents where clicking on a word or words opens up a different website." -msgstr "" -"どこか別のサイトへのリンクを含む、ある種のテキスト。一般的に、別の Web サイト" -"を開く言葉をクリックするドキュメントに見られる。" - -msgid "Any node running a daemon or worker that provides an API endpoint." -msgstr "" -"API エンドポイントを提供するデーモンまたはワーカーを実行するあらゆるノード。" - -msgid "" -"Any piece of hardware or software that wants to connect to the network " -"services provided by Networking, the network connectivity service. An entity " -"can make use of Networking by implementing a VIF." -msgstr "" -"Networking により提供されるネットワークサービス、ネットワーク接続性サービスに" -"接続したい、ハードウェアやソフトウェアの部品。エンティティーは、仮想インター" -"フェースを実装することにより Networking を使用できる。" - -msgid "" -"Any user, including the ``root`` user, can run commands that are prefixed " -"with the ``$`` prompt." -msgstr "" -"``$`` プロンプトから始まるコマンドは、 ``root`` ユーザーを含むすべてのユー" -"ザーが実行できます。" - -msgid "Apache" -msgstr "Apache" - -msgid "" -"Apache Hadoop is an open source software framework that supports data-" -"intensive distributed applications." -msgstr "" -"Apache Hadoop は、データインテンシブな分散アプリケーションをサポートする、" -"オープンソースソフトウェアフレームワークである。" - -msgid "Apache License 2.0" -msgstr "Apache License 2.0" - -msgid "Apache Web Server" -msgstr "Apache Web Server" - -msgid "Application Catalog service" -msgstr "Application Catalog サービス" - -msgid "Application Programming Interface (API)" -msgstr "Application Programming Interface (API)" - -msgid "Application Service Provider (ASP)" -msgstr "Application Service Provider (ASP)" - -msgid "" -"Arbitrary property to associate with image. This option can be used multiple " -"times." -msgstr "" -"イメージと関連付ける任意のプロパティ。このオプションは複数回使用できます。" - -msgid "" -"As a cloud end user, you can use the OpenStack dashboard to provision your " -"own resources within the limits set by administrators. You can modify the " -"examples provided in this section to create other types and sizes of server " -"instances." -msgstr "" -"あなたはクラウドのエンドユーザーとして、OpenStack dashboard を使用できます。" -"管理者により設定された制限の範囲内で自身のリソースを展開できます。他の種類や" -"大きさのサーバーインスタンスを作成するために、このセクションで提供される例を" -"変更できます。" - -msgid "" -"As an administrator, you can migrate a volume with its data from one " -"location to another in a manner that is transparent to users and workloads. " -"You can migrate only detached volumes with no snapshots." -msgstr "" -"管理者は、ユーザーに意識させず、またワークロードの中断なしに、データを含めた" -"状態でボリュームを別の場所に移動することができます。スナップショットを持たな" -"い、切断されているボリュームだけが移動できます。" - -msgid "" -"As shown in :ref:`get_started_conceptual_architecture`, OpenStack consists " -"of several independent parts, named the OpenStack services. All services " -"authenticate through a common Identity service. Individual services interact " -"with each other through public APIs, except where privileged administrator " -"commands are necessary." -msgstr "" -":ref:`get_started_conceptual_architecture` にあるように、 OpenStack は " -"OpenStack サービスと呼ばれる複数の独立した部品で構成されています。すべての" -"サービスは共通の Identity service を通して認証を行います。個々のサービスは、" -"パブリック API を通じて互いに連携します。ただし、特権管理コマンドが必要な場合" -"もいくつかあります。" - -msgid "" -"As the volume donor, request a volume transfer authorization code for a " -"specific volume:" -msgstr "" -"ボリュームの譲渡元として、特定のボリュームのボリューム転送認証コードを要求し" -"ます。" - -msgid "" -"As the volume recipient, you must first obtain the transfer ID and " -"authorization key from the original owner." -msgstr "" -"ボリュームの受取側として、まず、元の所有者から転送 ID と認証キーを取得する必" -"要があります。" - -msgid "" -"Association of an interface ID to a logical port. Plugs an interface into a " -"port." -msgstr "" -"論理ポートへのインターフェースIDの紐付け。インターフェースをポートに差し込" -"む。" - -msgid "Asynchronous JavaScript and XML (AJAX)" -msgstr "Asynchronous JavaScript and XML (AJAX)" - -msgid "Attach a volume to an instance" -msgstr "ボリュームのインスタンスへの接続" - -msgid "" -"Attach your volume to a server, specifying the server ID and the volume ID:" -msgstr "" -" ボリュームをサーバーに接続し、サーバー ID とボリューム ID を指定します。" - -msgid "" -"Attachment point where a virtual interface connects to a virtual network." -msgstr "仮想ネットワークへの仮想インタフェースの接続ポイント。" - -msgid "Austin" -msgstr "Austin" - -msgid "AuthN" -msgstr "AuthN" - -msgid "AuthZ" -msgstr "AuthZ" - -msgid "" -"Authentication and identity service by Microsoft, based on LDAP. Supported " -"in OpenStack." -msgstr "" -"Microsoft が提供する認証サービス。LDAP に基づいている。OpenStack でサポートさ" -"れる。" - -msgid "Authentication method that uses keys rather than passwords." -msgstr "パスワードの代わりに鍵を使用する認証方式。" - -msgid "" -"Authentication method that uses two or more credentials, such as a password " -"and a private key. Currently not supported in Identity." -msgstr "" -"パスワードと秘密鍵など、2 つ以上のクレデンシャルを使用する認証方式。Identity " -"では現在サポートされていない。" - -msgid "Auto ACK" -msgstr "自動 ACK" - -msgid "" -"Automated software test suite designed to run against the trunk of the " -"OpenStack core project." -msgstr "" -"OpenStack コアプロジェクトの trunk ブランチに対してテストを実行するために設計" -"された自動ソフトウェアテストスイート。" - -msgid "Available from anywhere" -msgstr "どこからでも利用可能" - -msgid "Available instance types" -msgstr "利用できるインスタンス種別" - -msgid "Available networks" -msgstr "利用可能なネットワーク" - -msgid "BMC" -msgstr "BMC" - -msgid "Bare Metal service" -msgstr "Bare Metal サービス" - -msgid "" -"Baseboard Management Controller. The intelligence in the IPMI architecture, " -"which is a specialized micro-controller that is embedded on the motherboard " -"of a computer and acts as a server. Manages the interface between system " -"management software and platform hardware." -msgstr "" -"ベースボード・マネジメント・コントローラー。IPMI アーキテクチャーにおける管理" -"機能。コンピューターのマザーボードに埋め込まれ、サーバーとして動作する、特別" -"なマイクロコントローラーである。システム管理ソフトウェアとプラットフォーム" -"ハードウェアの間の通信を管理する。" - -msgid "" -"Be sure to include the software and package versions that you are using, " -"especially if you are using a development branch, such as, ``\"Kilo release" -"\" vs git commit bc79c3ecc55929bac585d04a03475b72e06a3208``." -msgstr "" -"使用しているソフトウェアとパッケージのバージョンを明確にします。とくに開発ブ" -"ランチを使用している場合は、``\"Kilo release\" vs git commit " -"bc79c3ecc55929bac585d04a03475b72e06a3208`` などを明確にします。" - -msgid "" -"Before you can run client commands, you must create and source the ``PROJECT-" -"openrc.sh`` file to set environment variables. See :doc:`../common/" -"cli_set_environment_variables_using_openstack_rc`." -msgstr "" -"クライアントコマンドを実行する前に、環境変数を設定するために、``PROJECT-" -"openrc.sh`` ファイルを作成して読み込む必要があります。詳細は :doc:`../common/" -"cli_set_environment_variables_using_openstack_rc` を参照してください。" - -msgid "Bell-LaPadula model" -msgstr "Bell-LaPadula モデル" - -msgid "" -"Belongs to a particular domain and is used to specify information about the " -"domain. There are several types of DNS records. Each record type contains " -"particular information used to describe the purpose of that record. Examples " -"include mail exchange (MX) records, which specify the mail server for a " -"particular domain; and name server (NS) records, which specify the " -"authoritative name servers for a domain." -msgstr "" -"特定のドメインに属し、ドメインに関する情報を指定するために使用される。いくつ" -"かの種類の DNS レコードがある。各レコード種別は、そのレコードの目的を説明する" -"ために使用される特定の情報を含む。例えば、mail exchange (MX) レコードは、特定" -"のドメインのメールサーバーを指定する。name server (NS) レコードは、ドメインの" -"権威ネームサーバーを指定する。" - -msgid "Benchmark service" -msgstr "Benchmark サービス" - -msgid "Bexar" -msgstr "Bexar" - -msgid "Block Storage API" -msgstr "Block Storage API" - -msgid "Block Storage service" -msgstr "Block Storage サービス" - -msgid "Block storage (cinder)" -msgstr "Block storage (cinder)" - -msgid "" -"Block storage that is simultaneously accessible by multiple clients, for " -"example, NFS." -msgstr "" -"複数のクライアントにより同時にアクセス可能なブロックストレージ。例えば NFS。" - -msgid "Bootstrap Protocol (BOOTP)" -msgstr "Bootstrap Protocol (BOOTP)" - -msgid "Border Gateway Protocol (BGP)" -msgstr "Border Gateway Protocol (BGP)" - -msgid "" -"Both Image service and Compute support encrypted virtual machine (VM) images " -"(but not instances). In-transit data encryption is supported in OpenStack " -"using technologies such as HTTPS, SSL, TLS, and SSH. Object Storage does not " -"support object encryption at the application level but may support storage " -"that uses disk encryption." -msgstr "" -"Image service と Compute は、どちらも仮想マシンイメージ (インスタンスではな" -"い) の暗号化をサポートする。転送中のデータ暗号は、HTTPS、SSL、TLS、SSH などの" -"技術を使用して、OpenStack においてサポートされる。Object Storage は、アプリ" -"ケーションレベルでオブジェクト暗号化をサポートしませんが、ディスク暗号化を使" -"用するストレージをサポートする可能性がある。" - -msgid "Both a VM container format and disk format. Supported by Image service." -msgstr "" -"仮想マシンのコンテナー形式とディスク形式の両方。Image service によりサポート" -"される。" - -msgid "" -"Bring down a physical storage device for maintenance without disrupting " -"workloads." -msgstr "" -" ワークロードを中断せずにメンテナンスを行えるように物理ストレージデバイスを停" -"止する 。" - -msgid "" -"Builds and manages rings within Object Storage, assigns partitions to " -"devices, and pushes the configuration to other storage nodes." -msgstr "" -"Object Storage のリングの作成、管理を行い、パーティションのデバイスへの割り当" -"てを行い、他のストレージノードに設定を転送する。" - -msgid "" -"By default the help URL points to http://docs.openstack.org. Change this by " -"editing the following attribute to the URL of your choice in " -"``local_settings.py``:" -msgstr "" -"ヘルプの URL は、デフォルトでは http://docs.openstack.org を参照しています。" -"これを変更するには、 ``local_settings.py`` で以下の属性をお好みの URL に変更" -"します。" - -msgid "CA" -msgstr "CA" - -msgid "CADF" -msgstr "CADF" - -msgid "CALL" -msgstr "CALL" - -msgid "CAST" -msgstr "CAST" - -msgid "CMDB" -msgstr "CMDB" - -msgid "Cactus" -msgstr "Cactus" - -msgid "" -"Can concurrently use multiple layer-2 networking technologies, such as " -"802.1Q and VXLAN, in Networking." -msgstr "" -"Networking において、802.1Q や VXLAN などの複数の L2 ネットワーク技術を同時に" -"使用できる。" - -msgid "" -"Causes the network interface to pass all traffic it receives to the host " -"rather than passing only the frames addressed to it." -msgstr "" -"ネットワークインターフェースが、そこを指定されたフレームだけではなく、ホスト" -"に届いたすべての通信を渡すようにする。" - -msgid "CentOS" -msgstr "CentOS" - -msgid "Ceph" -msgstr "Ceph" - -msgid "" -"Ceph component that enables a Linux block device to be striped over multiple " -"distributed data stores." -msgstr "" -"Linux ブロックデバイスが複数の分散データストアにわたり分割できるようにする、" -"Ceph のコンポーネント。" - -msgid "CephFS" -msgstr "CephFS" - -msgid "" -"Certificate Authority or Certification Authority. In cryptography, an entity " -"that issues digital certificates. The digital certificate certifies the " -"ownership of a public key by the named subject of the certificate. This " -"enables others (relying parties) to rely upon signatures or assertions made " -"by the private key that corresponds to the certified public key. In this " -"model of trust relationships, a CA is a trusted third party for both the " -"subject (owner) of the certificate and the party relying upon the " -"certificate. CAs are characteristic of many public key infrastructure (PKI) " -"schemes." -msgstr "" -"認証局。暗号において、電子証明書を発行するエンティティー。電子証明書は、証明" -"書の発行先の名前により公開鍵の所有者を証明する。これにより、他の信頼される機" -"関が証明書を信頼できるようになる。また、証明された公開鍵に対応する秘密鍵によ" -"る表明を信頼できるようになる。この信頼関係のモデルにおいて、CA は証明書の発行" -"先と証明書を信頼している機関の両方に対する信頼された第三者機関である。CA は、" -"多くの公開鍵基盤 (PKI) スキームの特徴である。" - -msgid "Challenge-Handshake Authentication Protocol (CHAP)" -msgstr "Challenge-Handshake Authentication Protocol (CHAP)" - -msgid "" -"Change the colors and image file names as appropriate, though the relative " -"directory paths should be the same. The following example file shows you how " -"to customize your CSS file:" -msgstr "" -"カラーとイメージファイルの名前を適切に変更します。ただし、相対ディレクトリー" -"パスは同じにすべきです。以下のサンプルファイルに、CSS ファイルをどのようにカ" -"スタマイズするかを示します。" - -msgid "Changes to these types of disk volumes are saved." -msgstr "この種類のディスクボリュームに変更すると、データが保存される。" - -msgid "" -"Checks for and deletes unused VMs; the component of Image service that " -"implements delayed delete." -msgstr "" -"未使用の仮想マシンを確認し、削除する。遅延削除を実装する、Image service のコ" -"ンポーネント。" - -msgid "" -"Checks for missing replicas and incorrect or corrupted objects in a " -"specified Object Storage account by running queries against the back-end " -"SQLite database." -msgstr "" -"バックエンドの SQLite データベースに問い合わせることにより、指定された " -"Object Storage のアカウントに、レプリカの欠損やオブジェクトの不整合・破損がな" -"いかを確認する。" - -msgid "" -"Checks for missing replicas or incorrect objects in specified Object Storage " -"containers through queries to the SQLite back-end database." -msgstr "" -"SQLite バックエンドデータベースへの問い合わせにより、指定した Object Storage " -"コンテナーにおいてレプリカの欠損やオブジェクトの不整合がないかを確認する。" - -msgid "Chef" -msgstr "Chef" - -msgid "" -"Choosing a host based on the existence of a GPU is currently unsupported in " -"OpenStack." -msgstr "GPU の有無によりホストを選択することは、現在 OpenStack で未サポート。" - -msgid "CirrOS" -msgstr "CirrOS" - -msgid "Cisco neutron plug-in" -msgstr "Cisco neutron プラグイン" - -msgid "Client" -msgstr "クライアント" - -msgid "" -"Cloud Auditing Data Federation (CADF) is a specification for audit event " -"data. CADF is supported by OpenStack Identity." -msgstr "" -"Cloud Auditing Data Federation (CADF) は、監査イベントデータの仕様である。" -"CADF は OpenStack Identity によりサポートされる。" - -msgid "Cloud Data Management Interface (CDMI)" -msgstr "" -"クラウドデータ管理インターフェース (CDMI:Cloud Data Management Interface)" - -msgid "Cloud Infrastructure Management Interface (CIMI)" -msgstr "Cloud Infrastructure Management Interface (CIMI)" - -msgid "Cloudbase-Init" -msgstr "Cloudbase-Init" - -msgid "Clustering service" -msgstr "Clustering service" - -msgid "Code name for the DNS service project for OpenStack." -msgstr "OpenStack の DNS サービスプロジェクトのコード名。" - -msgid "" -"Code name for the OpenStack project that provides the Containers Service." -msgstr "コンテナーサービスを提供する OpenStack プロジェクトのコード名。" - -msgid "Code name of the key management service for OpenStack." -msgstr "OpenStack の key management サービスのコード名。" - -msgid "" -"Collection of Compute components that represent the global state of the " -"cloud; talks to services, such as Identity authentication, Object Storage, " -"and node/storage workers through a queue." -msgstr "" -"クラウドの全体状況を表す Compute コンポーネント群。キュー経由で、Identity の" -"認証、Object Storage、ノード/ストレージワーカーなどのサービスと通信する。" - -msgid "" -"Collective name for the Object Storage object services, container services, " -"and account services." -msgstr "" -"Object Storage のオブジェクトサービス、コンテナーサービス、アカウントサービス" -"の集合名。" - -msgid "" -"Collective term for Object Storage components that provide additional " -"functionality." -msgstr "追加機能を提供する Object Storage のコンポーネントの総称。" - -msgid "" -"Collective term for a group of Object Storage components that processes " -"queued and failed updates for containers and objects." -msgstr "" -"キュー済みや失敗した、コンテナーやオブジェクトに対する更新を処理する、Object " -"Storage のコンポーネントのグループの総称。" - -msgid "" -"Collects event and metering data by monitoring notifications sent from " -"services." -msgstr "" -"サービスから送信される通知を監視して、イベントと計測データを収集します。" - -msgid "" -"Combination of a URI and UUID used to access Image service VM images through " -"the image API." -msgstr "" -"Image API 経由で Image service の仮想マシンイメージにアクセスするために使用さ" -"れる、URI や UUID の組み合わせ。" - -msgid "Command prompts" -msgstr "コマンドプロンプト" - -msgid "Common Internet File System (CIFS)" -msgstr "Common Internet File System (CIFS)" - -msgid "" -"Community project that captures Compute AMQP communications; useful for " -"debugging." -msgstr "" -"Compute AMQP 通信をキャプチャーする、コミュニティーのプロジェクト。デバッグに" -"有用。" - -msgid "" -"Community project that uses shell scripts to quickly build complete " -"OpenStack development environments." -msgstr "" -"シェルスクリプトを使用して、完全な OpenStack 導入環境を迅速に構築するためのコ" -"ミュニティープロジェクト。" - -msgid "" -"Community project used to run automated tests against the OpenStack API." -msgstr "" -"OpenStack API に対して自動テストを実行するために使用されるコミュニティープロ" -"ジェクト。" - -msgid "Community support" -msgstr "コミュニティーのサポート" - -msgid "" -"Companies that rent specialized applications that help businesses and " -"organizations provide additional services with lower cost." -msgstr "" -"企業や組織を支援する特定のアプリケーションを貸し出す会社が、より低いコストで" -"追加サービスを提供する。" - -msgid "" -"Component of Identity that provides a rule-management interface and a rule-" -"based authorization engine." -msgstr "" -"ルール管理インターフェースやルールベースの認可エンジンを提供する Identity の" -"コンポーネント。" - -msgid "Compute API" -msgstr "Compute API" - -msgid "Compute service" -msgstr "Compute サービス" - -msgid "" -"Computer that provides explicit services to the client software running on " -"that system, often managing a variety of computer operations. A server is a " -"VM instance in the Compute system. Flavor and image are requisite elements " -"when creating a server." -msgstr "" -"そのシステムにおいて動作しているクライアントソフトウェアに具体的なサービスを" -"提供するコンピューター。さまざまなコンピューター処理を管理することもある。" -"サーバーは、Compute システム上の仮想マシンインスタンスです。フレーバーとイ" -"メージは、サーバーの作成時に必須の要素です。" - -msgid "Conceptual architecture" -msgstr "概念アーキテクチャー" - -msgid "" -"Configurable option within Object Storage to limit database writes on a per-" -"account and/or per-container basis." -msgstr "" -"アカウントごと、コンテナーごとにデータベースへの書き込みを制限するための、" -"Object Storage 内の設定オプション。" - -msgid "Configuration Management Database." -msgstr "構成管理データベース。" - -msgid "" -"Configuration setting within RabbitMQ that enables or disables message " -"acknowledgment. Enabled by default." -msgstr "" -"メッセージ ACK を有効化または無効化する、RabbitMQ 内の設定。デフォルトで有" -"効。" - -msgid "Configures networks for guest servers." -msgstr "ゲストサーバー用のネットワークの作成と管理。" - -msgid "" -"Connected to by a direct consumer in RabbitMQ—Compute, the message can be " -"consumed only by the current connection." -msgstr "" -"RabbitMQ—Compute において直接利用者により接続される。メッセージは、現在の接続" -"だけにより使用される。" - -msgid "Container servers (swift-container-server)" -msgstr "コンテナーサーバー (swift-container-server)" - -msgid "Containers service" -msgstr "Containers サービス" - -msgid "" -"Contains configuration information that Object Storage uses to reconfigure a " -"ring or to re-create it from scratch after a serious failure." -msgstr "" -"リングを再設定するため、深刻な障害の後に最初から再作成するために、Object " -"Storage が使用する設定情報を含む。" - -msgid "" -"Contains information about a user as provided by the identity provider. It " -"is an indication that a user has been authenticated." -msgstr "" -"認証プロバイダーにより提供されたとおり、ユーザーに関する情報を含む。ユーザー" -"が認証済みであることを意味する。" - -msgid "" -"Contains the locations of all Object Storage partitions within the ring." -msgstr "リング内にあるすべての Object Storage のパーティションの場所を含む。" - -msgid "Contains the output from a Linux VM console in Compute." -msgstr "Compute の Linux 仮想マシンコンソールからの出力を含む。" - -msgid "Contractual obligations that ensure the availability of a service." -msgstr "サービスの可用性を保証する契約上の義務。" - -msgid "Conventions" -msgstr "表記規則" - -msgid "" -"Converts an existing server to a different flavor, which scales the server " -"up or down. The original server is saved to enable rollback if a problem " -"occurs. All resizes must be tested and explicitly confirmed, at which time " -"the original server is removed." -msgstr "" -"既存のサーバーを別のフレーバーに変更する。サーバーをスケールアップまたはス" -"ケールダウンする。元のサーバーは、問題発生時にロールバックできるよう保存され" -"る。すべてのリサイズは、元のサーバーを削除するときに、テストされ、明示的に確" -"認される必要がある。" - -msgid "" -"Copy the ``PROJECT-openrc.sh`` file to the computer from which you want to " -"run OpenStack commands." -msgstr "" -"OpenStack コマンドを実行したいコンピューターに ``PROJECT-openrc.sh`` ファイル" -"をコピーします。" - -msgid "" -"Create a CSS style sheet in ``/usr/share/openstack-dashboard/" -"openstack_dashboard/static/dashboard/scss/``." -msgstr "" -"CSS スタイルシートを ``/usr/share/openstack-dashboard/openstack_dashboard/" -"static/dashboard/scss/`` に作成します。" - -msgid "" -"Create a Database service instance using the :command:`trove create` command." -msgstr "" -":command:`trove create` コマンドを使用して、Database サービスのインスタンスを" -"作成します。" - -msgid "" -"Create a custom bootable volume or a volume with a large data set and " -"transfer it to a customer." -msgstr "" -"大容量のデータセットが含まれるボリュームやカスタムのブータブルボリュームを作" -"成して顧客に転送する。" - -msgid "Create a volume" -msgstr "ボリュームの作成" - -msgid "Create a volume from specified volume type" -msgstr "指定したボリューム種別からのボリューム作成" - -msgid "Create a volume transfer request" -msgstr "ボリューム譲渡要求の作成" - -msgid "" -"Create a volume with 8 gibibytes (GiB) of space, and specify the " -"availability zone and image:" -msgstr "" -" 容量 8 GiB のボリュームを作成し、アベイラビリティゾーンとイメージを指定しま" -"す。 " - -msgid "" -"Create an image for each type of database. For example, one for MySQL and " -"one for MongoDB." -msgstr "" -"各種データベース用のイメージを作成します。例えば、MySQL 用のもの、MongoDB 用" -"のものです。" - -msgid "Create and source the OpenStack RC file" -msgstr "OpenStack RC ファイルの作成と読み込み" - -msgid "Create or update an image (glance)" -msgstr "イメージ (glance) の作成・更新" - -msgid "" -"Create two PNG logo files with transparent backgrounds using the following " -"sizes:" -msgstr "以下の 2 つの大きさの透過 PNG ロゴファイルを作成します。" - -msgid "" -"Creates a full Object Storage development environment within a single VM." -msgstr "単一の仮想マシンに一通りの Object Storage 開発環境を作成すること。" - -msgid "Creates and collects measurements across OpenStack." -msgstr "OpenStack 全体の計測項目を作成、収集します。" - -msgid "Creates and manages Hadoop clusters on OpenStack." -msgstr "OpenStack 上への Hadoop クラスターの作成と管理。" - -msgid "Creates and manages applications." -msgstr "アプリケーションの作成と管理。" - -msgid "Creates and manages clustering services." -msgstr "クラスタリングサービスの作成と管理。" - -msgid "Creates and manages containers." -msgstr "コンテナーの作成と管理。" - -msgid "Creates and manages databases." -msgstr "データベースの作成と管理。" - -msgid "Creates and manages images, instances, and flavors." -msgstr "イメージ、インスタンス、フレーバーの作成と管理。" - -msgid "Creates and manages images." -msgstr "イメージの作成と管理。" - -msgid "Creates and manages keys." -msgstr "鍵の作成と管理。" - -msgid "Creates and manages shared file systems." -msgstr "共有ファイルシステムを作成および管理します。" - -msgid "Creates and manages users, tenants, roles, endpoints, and credentials." -msgstr "ユーザー、テナント、ロール、エンドポイント、認証情報の作成と管理。" - -msgid "Creates and manages volumes." -msgstr "ボリュームの作成と管理。" - -msgid "Critical information about the risk of data loss or security issues." -msgstr "データ損失やセキュリティー問題のリスクに関する致命的な情報です。" - -msgid "Cross-Origin Resource Sharing (CORS)" -msgstr "Cross-Origin Resource Sharing (CORS)" - -msgid "Crowbar" -msgstr "Crowbar" - -msgid "" -"Currently the libvirt virtualization tool determines the disk, CD-ROM, and " -"VIF device models based on the configured hypervisor type (``libvirt_type`` " -"in ``/etc/nova/nova.conf`` file). For the sake of optimal performance, " -"libvirt defaults to using virtio for both disk and VIF (NIC) models. The " -"disadvantage of this approach is that it is not possible to run operating " -"systems that lack virtio drivers, for example, BSD, Solaris, and older " -"versions of Linux and Windows." -msgstr "" -"現在、libvirt 仮想化ツールは、設定したハイパーバイザーの種別 (``/etc/nova/" -"nova.conf`` ファイルの ``libvirt_type`` で指定)を基にディスク、CD_ROM、VIF デ" -"バイスモデルを決定します。最適なパフォーマンスを実現するため、libvirt はディ" -"スクおよび VIF (NIC) モデルのいずれの場合も virtio を使用するように初期設定さ" -"れています。この手法の欠点は、virtio ドライバーがないオペレーティングシステ" -"ム (例、BSD、Solaris、Linux および Windows の以前のバージョン) を実行できない" -"点です。" - -msgid "Currently, the clients do not support Python 3." -msgstr "現在、クライアントは Python 3 をサポートしません。" - -msgid "Custom modules that extend some OpenStack core APIs." -msgstr "いくつかの OpenStack コア API を拡張するカスタムモジュール。" - -msgid "Customize the dashboard" -msgstr "ダッシュボードのカスタマイズ" - -msgid "DAC" -msgstr "DAC" - -msgid "DHCP" -msgstr "DHCP" - -msgid "DHCP agent" -msgstr "DHCP エージェント" - -msgid "DNS" -msgstr "DNS" - -msgid "DNS record" -msgstr "DNS レコード" - -msgid "DNS service" -msgstr "DNS サービス" - -msgid "DRTM" -msgstr "DRTM" - -msgid "" -"Daemon that provides DNS, DHCP, BOOTP, and TFTP services for virtual " -"networks." -msgstr "" -"仮想ネットワーク向けに DNS、DHCP、BOOTP、TFTP サービスを提供するデーモン。" - -msgid "Dashboard" -msgstr "Dashboard" - -msgid "Data Processing service" -msgstr "Data Processing サービス" - -msgid "" -"Data that is only known to or accessible by a user and used to verify that " -"the user is who he says he is. Credentials are presented to the server " -"during authentication. Examples include a password, secret key, digital " -"certificate, and fingerprint." -msgstr "" -"ユーザーのみが知っている、またはアクセスできるデータ。ユーザーが正当であるこ" -"とを検証するために使用される。クレデンシャルは、認証中にサーバーに提示され" -"る。例えば、パスワード、秘密鍵、電子証明書、フィンガープリントなどがある。" - -msgid "Database" -msgstr "データベース" - -msgid "Database service" -msgstr "Database サービス" - -msgid "Debian" -msgstr "Debian" - -msgid "" -"Defines resources for a cell, including CPU, storage, and networking. Can " -"apply to the specific services within a cell or a whole cell." -msgstr "" -"CPU、ストレージ、ネットワークを含むセルのリソースを定義する。1セルやセル全体" -"に含まれる特定のサービスに適用可能。" - -msgid "" -"Defining environment variables using an environment file is not a common " -"practice on Microsoft Windows. Environment variables are usually defined in " -"the :menuselection:`Advanced > System Properties` dialog box." -msgstr "" -"環境変数ファイルを用いて環境変数を定義することは、Microsoft Windows で一般的" -"な手法ではありません。環境変数は通常、 :menuselection:`システム > 詳細設定` " -"で定義されます。" - -msgid "Delete a volume" -msgstr "ボリュームの削除" - -msgid "Delete a volume transfer" -msgstr "ボリューム譲渡の削除" - -msgid "Delete the volume using either the volume name or ID:" -msgstr "ボリューム名または ID を使用してボリュームを削除します。" - -msgid "Delete the volume:" -msgstr "ボリュームを削除します。" - -msgid "" -"Deletes all image properties that are not explicitly set in the update " -"request. Otherwise, those properties not referenced are preserved." -msgstr "" -"更新要求に明示的に設定されていないイメージのプロパティをすべて削除します。そ" -"うでない場合は、参照されていないプロパティは保存されます。" - -msgid "" -"Denial of service (DoS) is a short form for denial-of-service attack. This " -"is a malicious attempt to prevent legitimate users from using a service." -msgstr "" -"DoS は、サービス妨害攻撃の省略形である。正当なユーザーがサービスを使用するこ" -"とを妨害するための悪意のある試み。" - -msgid "" -"Depending on context, the core API is either the OpenStack API or the main " -"API of a specific core project, such as Compute, Networking, Image service, " -"and so on." -msgstr "" -"コア API は、文脈に応じて OpenStack API または特定のコアプロジェクトのメイン " -"API を意味する。コアプロジェクトは、Compute、Networking、Image service などが" -"ある。" - -msgid "Deployment service" -msgstr "デプロイサービス" - -msgid "" -"Describes the parameters of the various virtual machine images that are " -"available to users; includes parameters such as CPU, storage, and memory. " -"Alternative term for flavor." -msgstr "" -"ユーザが利用可能な様々な仮想マシンイメージのパラメーター(CPU、ストレージ、メ" -"モリ等を含む)を示す。フレーバーの別名。" - -msgid "Description" -msgstr "説明" - -msgid "Description of the snapshot. Defaults to ``None``." -msgstr "スナップショットの説明。デフォルトは ``None`` です。" - -msgid "Designed as an OpenStack component." -msgstr "OpenStack のコンポーネントとして設計されています。" - -msgid "Desktop-as-a-Service" -msgstr "Desktop-as-a-Service" - -msgid "" -"Determines whether back-end members of a VIP pool can process a request. A " -"pool can have several health monitors associated with it. When a pool has " -"several monitors associated with it, all monitors check each member of the " -"pool. All monitors must declare a member to be healthy for it to stay active." -msgstr "" -"仮想 IP プールのバックエンドメンバーがリクエストを処理できるかどうかを判断す" -"る。プールは、それに関連づけられた複数のヘルスモニターを持てる。すべてのモニ" -"ターは、プールのメンバーをお互いに確認する。すべてのモニターは、その稼働状況" -"の健全性であることをメンバーに宣言する必要がある。" - -msgid "DevStack" -msgstr "DevStack" - -msgid "" -"Device plugged into a PCI slot, such as a fibre channel or network card." -msgstr "" -"ファイバーチャネルやネットワークカードなどの PCI スロット内に挿入されるデバイ" -"ス。" - -msgid "Diablo" -msgstr "Diablo" - -msgid "" -"Disables server-side message acknowledgment in the Compute RabbitMQ. " -"Increases performance but decreases reliability." -msgstr "" -"Compute RabbitMQ において、サーバーサイドメッセージ交換を無効化する。性能を向" -"上されるが、信頼性を低下させる。" - -msgid "Discover the version number for a client" -msgstr "クライアントのバージョン番号の確認" - -msgid "" -"Discretionary access control. Governs the ability of subjects to access " -"objects, while enabling users to make policy decisions and assign security " -"attributes. The traditional UNIX system of users, groups, and read-write-" -"execute permissions is an example of DAC." -msgstr "" -"任意アクセス制御。サブジェクトがオブジェクトにアクセスする機能を統制する。" -"ユーザーがポリシーを決定し、セキュリティー属性を割り当てられる。伝統的な " -"UNIX システムのユーザー、グループ、読み書き権限が、DAC の例である。" - -msgid "" -"Disk-based data storage generally represented as an iSCSI target with a file " -"system that supports extended attributes; can be persistent or ephemeral." -msgstr "" -"ディスクを用いたデータストレージ。一般的に、拡張属性をサポートするファイルシ" -"ステムを持つ、iSCSI ターゲットとして利用される。永続的なものと一時的なものが" -"ある。" - -msgid "" -"Disk-based virtual memory used by operating systems to provide more memory " -"than is actually available on the system." -msgstr "" -"システムにおいて実際に利用可能なメモリーより多くのメモリーをオペレーティング" -"システムにより使用されるディスクベースの仮想メモリー。" - -msgid "Distributed block storage system for QEMU, supported by OpenStack." -msgstr "" -"OpenStack によりサポートされる、QEMU 用の分散ブロックストレージシステム。" - -msgid "" -"Distributes partitions proportionately across Object Storage devices based " -"on the storage capacity of each device." -msgstr "" -"各デバイスのストレージキャパシティに基づき、Object Storage デバイスをまたがり" -"パーティションを比例分配する。" - -msgid "Django" -msgstr "Django" - -msgid "Documentation" -msgstr "ドキュメント" - -msgid "Documentation feedback" -msgstr "ドキュメントへのフィードバック" - -msgid "Domain Name System (DNS)" -msgstr "Domain Name System (DNS)" - -msgid "" -"Domain Name System. A hierarchical and distributed naming system for " -"computers, services, and resources connected to the Internet or a private " -"network. Associates a human-friendly names to IP addresses." -msgstr "" -"ドメインネームシステム。インターネットやプライベートネットワークに接続される" -"コンピューター、サービス、リソースの名前を管理する階層化分散システム。人間が" -"理解しやすい名前を IP アドレスに関連付ける。" - -msgid "Download and source the OpenStack RC file" -msgstr "OpenStack RC ファイルのダウンロードと読み込み" - -msgid "Drivers" -msgstr "ドライバー" - -msgid "" -"Drivers or a service back end are integrated to the centralized server. They " -"are used for accessing identity information in repositories external to " -"OpenStack, and may already exist in the infrastructure where OpenStack is " -"deployed (for example, SQL databases or LDAP servers)." -msgstr "" -"ドライバーやサービスバックエンドは、中央サーバーと統合されています。ドライ" -"バーは、OpenStack の外部にあるリポジトリーにある認証情報にアクセスするために" -"使用されます。こうしたリポジトリーは、OpenStack が導入されるインフラストラク" -"チャーにすでに存在する場合もあります (例えば、SQL データベースやLDAP サー" -"バー)。" - -msgid "" -"During the set up or testing of OpenStack, you might have questions about " -"how a specific task is completed or be in a situation where a feature does " -"not work correctly. Use the `ask.openstack.org `__ site to ask questions and get answers. When you visit the https://" -"ask.openstack.org site, scan the recently asked questions to see whether " -"your question has already been answered. If not, ask a new question. Be sure " -"to give a clear, concise summary in the title and provide as much detail as " -"possible in the description. Paste in your command output or stack traces, " -"links to screen shots, and any other information which might be useful." -msgstr "" -"OpenStack の構築中やテスト中に、ある作業を行うのにどうすればよいか質問した" -"り、ある機能が正しく動作しない状況になったりするかもしれません。 `ask." -"openstack.org `__ サイトを使用して、質問して回答を" -"得られます。 https://ask.openstack.org にアクセスしたら、最近の質問を検索し" -"て、あなたと同じ質問がすでに回答されているかを確認します。まだの場合、新しく" -"質問します。見出しの概要は明確かつ正確にしてください。そして、説明はできる限" -"り詳細にしてください。コマンドの出力、スタックトレース、スクリーンショットへ" -"のリンク、その他有用な情報を貼り付けてください。" - -msgid "Dynamic Host Configuration Protocol (DHCP)" -msgstr "動的ホスト設定プロトコル(DHCP)" - -msgid "" -"Dynamic Host Configuration Protocol. A network protocol that configures " -"devices that are connected to a network so that they can communicate on that " -"network by using the Internet Protocol (IP). The protocol is implemented in " -"a client-server model where DHCP clients request configuration data, such as " -"an IP address, a default route, and one or more DNS server addresses from a " -"DHCP server." -msgstr "" -"Dynamic Host Configuration Protocol。ネットワークに接続されたデバイスが、その" -"ネットワーク上で IP を使用して通信できるよう、ネットワークデバイスを設定する" -"ネットワークプロトコル。このプロトコルは、クライアントサイドモデルで実装され" -"ている。DHCP クライアントは、IP アドレス、デフォルトルート、1 つ以上の DNS " -"サーバーアドレス設定データを要求する。" - -msgid "Dynamic HyperText Markup Language (DHTML)" -msgstr "Dynamic HyperText Markup Language (DHTML)" - -msgid "Dynamic root of trust measurement." -msgstr "Dynamic root of trust measurement." - -msgid "EBS boot volume" -msgstr "EBS ブートボリューム" - -msgid "EC2" -msgstr "EC2" - -msgid "EC2 API" -msgstr "EC2 API" - -msgid "EC2 Compatibility API" -msgstr "EC2 互換API" - -msgid "EC2 access key" -msgstr "EC2 アクセスキー" - -msgid "EC2 secret key" -msgstr "EC2 シークレットキー" - -msgid "ESXi" -msgstr "ESXi" - -msgid "ETag" -msgstr "ETag" - -msgid "" -"Each OpenStack project provides a command-line client, which enables you to " -"access the project API through easy-to-use commands. For example, the " -"Compute service provides a ``nova`` command-line client." -msgstr "" -"各 OpenStack プロジェクトは、使いやすいコマンドからプロジェクトの API にアク" -"セスできる、コマンドラインクライアントを提供しています。例えば、Compute は " -"``nova`` コマンドラインクライアントを提供しています。" - -msgid "" -"Each OpenStack release has a code name. Code names ascend in alphabetical " -"order: Austin, Bexar, Cactus, Diablo, Essex, Folsom, Grizzly, Havana, " -"Icehouse, Juno, Kilo, Liberty, and Mitaka. Code names are cities or counties " -"near where the corresponding OpenStack design summit took place. An " -"exception, called the Waldon exception, is granted to elements of the state " -"flag that sound especially cool. Code names are chosen by popular vote." -msgstr "" -"各 OpenStack リリースはコード名を持つ。コード名はアルファベット順になります。" -"Austin, Bexar, Cactus, Diablo, Essex, Folsom, Grizzly, Havana, Icehouse, " -"Juno, Kilo、Liberty、Mitaka。コード名は、対応する OpenStack デザインサミット" -"が開催された場所の近くにある都市または国である。Waldon 例外と言われる例外は、" -"非常に格好良く聞こえる、状態フラグの要素に保証される。コード名は、一般的な投" -"票により選択される。" - -msgid "Easily scalable for future growth" -msgstr "将来のサイズ増加に応じて容易に拡張可能" - -msgid "Efficiently polls metering data related to OpenStack services." -msgstr "OpenStack サービスに関連する計測データを効率的に取得します。" - -msgid "" -"Either a soft or hard reboot of a server. With a soft reboot, the operating " -"system is signaled to restart, which enables a graceful shutdown of all " -"processes. A hard reboot is the equivalent of power cycling the server. The " -"virtualization platform should ensure that the reboot action has completed " -"successfully, even in cases in which the underlying domain/VM is paused or " -"halted/stopped." -msgstr "" -"サーバーのソフトリブートまたはハードリブート。ソフトリブートの場合、オペレー" -"ティングに再起動のシグナルが送信されます。これにより、すべてのプロセスを穏や" -"かにシャットダウンできます。ハードリブートは、サーバーの強制再起動と同じで" -"す。仮想化プラットフォームは、ベースの仮想マシンが一時停止中の場合や停止中の" -"場合でも、きちんとリブート動作を正常に完了させるべきです。" - -msgid "Elastic Block Storage (EBS)" -msgstr "Elastic Block Storage (EBS)" - -msgid "Element of RabbitMQ that provides a response to an incoming MQ message." -msgstr "送信されてきた MQ メッセージに応答する RabbitMQ の要素。" - -msgid "" -"Enables Compute and Networking integration, which enables Networking to " -"perform network management for guest VMs." -msgstr "" -"Compute と Networking の統合を可能にする。Networking がゲスト仮想マシン用の" -"ネットワークを管理できるようになる。" - -msgid "" -"Enables Compute to communicate with NetApp storage devices through the " -"NetApp OnCommand Provisioning Manager." -msgstr "" -"Compute が NetApp OnCommand Provisioning Manager 経由で NetApp ストレージデバ" -"イスと通信できるようにする。" - -msgid "" -"Enables Network-Connectivity-as-a-Service for other OpenStack services, such " -"as OpenStack Compute. Provides an API for users to define networks and the " -"attachments into them. Has a pluggable architecture that supports many " -"popular networking vendors and technologies." -msgstr "" -"OpenStack Compute のような他の OpenStack サービスに対して、Network-" -"Connectivity-as-a-Service をできるようにします。ユーザーがネットワークやそれ" -"らへの接続を定義するための API を提供します。プラグイン可能なアーキテクチャー" -"で、数多くの人気のあるネットワークベンダーや技術に対応しています。" - -msgid "" -"Enables Networking to distribute incoming requests evenly between designated " -"instances." -msgstr "" -"Networking により、受信リクエストを指定されたインスタンス間で均等に分散できる" -"ようになる。" - -msgid "" -"Enables a Linux bridge to understand a Networking port, interface " -"attachment, and other abstractions." -msgstr "" -"Linux ブリッジが、Networking のポート、インターフェース接続、他の抽象化を理解" -"できるようにする。" - -msgid "Enables users to submit commands as a tenant administrator or end user." -msgstr "" -"ユーザーが、テナント管理者もしくはエンドユーザーとしてコマンドを発行するのに" -"使用します。" - -msgid "" -"Enables users to submit commands to the REST API through a command-line " -"client authorized as either a admin user, reseller user, or swift user." -msgstr "" -"ユーザーがコマンドラインクライアントを使って REST API にコマンドを発行するの" -"に使用します。管理者ユーザー、 reseller ユーザー、 swift ユーザーのいずれの" -"ユーザーでも使用できます。" - -msgid "Encryption is available" -msgstr "暗号化が利用可能" - -msgid "Encryption is not available yet" -msgstr "暗号化はまだ利用不可" - -msgid "" -"Ensure that the ``C:\\Python27\\Scripts`` directory is defined in the " -"``PATH`` environment variable, and use the ``easy_install`` command from the " -"setuptools package:" -msgstr "" -"``C:\\Python27\\Scripts`` ディレクトリーが ``PATH`` 環境変数に定義されている" -"ことを確認します。setuptools パッケージの ``easy_install`` コマンドを使用しま" -"す。" - -msgid "" -"Ensure that the version of qemu you are using is version 0.14 or later. " -"Earlier versions of qemu result in an ``unknown option -s`` error message in " -"the ``nova-compute.log`` file." -msgstr "" -"お使いの qemu バージョンが 0.14 以降であることを確認してください。qemu のバー" -"ジョンがそれ以前の場合、``nova-compute.log`` ファイルで ``unknown option -" -"s`` のエラーメッセージが表示されます。 " - -msgid "Essex" -msgstr "Essex" - -msgid "Eucalyptus Kernel Image (EKI)" -msgstr "Eucalyptus Kernel Image (EKI)" - -msgid "Eucalyptus Machine Image (EMI)" -msgstr "Eucalyptus Machine Image (EMI)" - -msgid "Eucalyptus Ramdisk Image (ERI)" -msgstr "Eucalyptus Ramdisk Image (ERI)" - -msgid "" -"Examine the ``/var/log/nova/nova-api.log`` and ``/var/log/nova/nova-compute." -"log`` log files for error messages." -msgstr "" -"``/var/log/nova/nova-api.log`` と ``/var/log/nova/nova-compute.log`` のログ" -"ファイルでエラーメッセージを確認してください。 " - -msgid "Example: 1 TB \"extra hard drive\"" -msgstr "例: 1 TB \"追加ハードディスク\"" - -msgid "Example: 1 TB of file share" -msgstr "例: 1 TB のファイル共有" - -msgid "Example: 10 GB first disk, 30 GB/core second disk" -msgstr "例: 1 番目の 10 GB ディスク、2 番目のコアあたり 30 GB ディスク" - -msgid "Example: 10s of TBs of data set storage" -msgstr "例: 数十TBのデータセットストレージ" - -msgid "" -"Extension to iptables that allows creation of firewall rules that match " -"entire \"sets\" of IP addresses simultaneously. These sets reside in indexed " -"data structures to increase efficiency, particularly on systems with a large " -"quantity of rules." -msgstr "" -"連続する IP アドレスの全体に一致するファイアウォールルールを作成できる、" -"iptables の拡張。これらのセットは、効率化するためにインデックス化されたデータ" -"構造、とくに大量のルールを持つシステムにあります。" - -msgid "FWaaS" -msgstr "FWaaS" - -msgid "" -"Facility in Compute that allows each virtual machine instance to have more " -"than one VIF connected to it." -msgstr "" -"各仮想マシンインスタンスが複数の仮想インターフェースに接続できるようになる、" -"Compute における機能。" - -msgid "" -"Facility in Compute that enables a virtual machine instance to have more " -"than one VIF connected to it." -msgstr "" -"各仮想マシンインスタンスが複数の仮想インターフェースに接続できるようになる、" -"Compute における機能。" - -msgid "FakeLDAP" -msgstr "FakeLDAP" - -msgid "" -"Fast provisioning of Hadoop clusters on OpenStack for development and QA." -msgstr "開発や QA 目的での Hadoop クラスターの OpenStack 上への迅速な構築" - -msgid "" -"Feature in modern Ethernet networks that supports frames up to approximately " -"9000 bytes." -msgstr "約 9000 バイトまでのフレームをサポートする最近のイーサネット上の機能。" - -msgid "" -"Feature of certain network interface drivers that combines many smaller " -"received packets into a large packet before delivery to the kernel IP stack." -msgstr "" -"カーネルの IP スタックに届ける前に、多くの小さな受信パケットを大きなパケット" -"に結合する、特定のネットワークインターフェースドライバーの機能。" - -msgid "Fedora" -msgstr "Fedora" - -msgid "Feedback" -msgstr "フィードバック" - -msgid "Fibre Channel" -msgstr "ファイバーチャネル" - -msgid "Fibre Channel over Ethernet (FCoE)" -msgstr "Fibre Channel over Ethernet (FCoE)" - -msgid "File Storage (manila)" -msgstr "File Storage (manila)" - -msgid "" -"File system option that enables storage of additional information beyond " -"owner, group, permissions, modification time, and so on. The underlying " -"Object Storage file system must support extended attributes." -msgstr "" -"所有者、グループ、パーミッション、変更時間など以外の追加情報を保存できるよう" -"にする、ファイルシステムのオプション。Object Storage のバックエンドのファイル" -"システムは、拡張属性をサポートする必要がある。" - -msgid "" -"Filtering tool for a Linux bridging firewall, enabling filtering of network " -"traffic passing through a Linux bridge. Used in Compute along with " -"arptables, iptables, and ip6tables to ensure isolation of network " -"communications." -msgstr "" -"Linux ブリッジのファイアウォール用のフィルタリングツール。Linux ブリッジを通" -"過するネットワーク通信のフィルタリングできる。ネットワーク通信を分離するため" -"に、OpenStack Compute において arptables、iptables、ip6tables と一緒に使用さ" -"れる。" - -msgid "Find the matching transfer ID:" -msgstr "合致する転送 ID を探します。" - -msgid "First, add the Open Build Service repository:" -msgstr "まず、Open Build Service のリポジトリーを追加します。" - -msgid "Flat Manager" -msgstr "Flat マネージャー" - -msgid "FlatDHCP Manager" -msgstr "FlatDHCP マネージャー" - -msgid "Folsom" -msgstr "Folsom" - -msgid "" -"For IaaS, ability for a regular (non-privileged) account to manage a virtual " -"infrastructure component such as networks without involving an administrator." -msgstr "" -"IaaS の場合、管理者が介することなく、通常の (特権を持たない) ユーザーがネット" -"ワークなどの仮想インフラのコンポーネントを管理する機能。" - -msgid "For Mac OS X or Linux:" -msgstr "Mac OS X または Linux の場合:" - -msgid "For Microsoft Windows:" -msgstr "Microsoft Windows の場合:" - -msgid "" -"For SUSE Linux Enterprise Server, use ``zypper`` to install the clients from " -"the distribution packages in the Open Build Service. First, add the Open " -"Build Service repository:" -msgstr "" -"SUSE Linux Enterprise Server の場合、``zypper`` を使用して、Open Build " -"Service にあるディストリビューションのパッケージサービスからクライアントをイ" -"ンストールします。まず、Open Build Service のリポジトリーを追加します。" - -msgid "" -"For Ubuntu or Debian, use ``apt-get`` to install the clients from the " -"packaged versions:" -msgstr "" -"Ubuntu と Debian の場合、``apt-get`` を使用してパッケージバージョンからクライ" -"アントをインストールします。" - -msgid "" -"For bulk import of data to the cloud, the data ingress system creates a new " -"Block Storage volume, copies data from the physical device, and transfers " -"device ownership to the end user." -msgstr "" -"データをまとめてクラウドにインポートために、データ受信システムが新たにブロッ" -"クストレージボリュームを作成し、物理デバイスからデータをコピーしてから、デバ" -"イスの所有権をエンドユーザーに転送する。" - -msgid "" -"For details about image creation, see the `Virtual Machine Image Guide " -"`__." -msgstr "" -"イメージ作成の詳細は `仮想マシンイメージガイド `__ を参照してください。" - -msgid "" -"For example, copy the file to the computer from which you want to upload an " -"image with a ``glance`` client command." -msgstr "" -"例えば、``glance`` クライアントコマンドを用いてイメージをアップロードしたいコ" -"ンピューターにファイルをコピーします。" - -msgid "" -"For example, to see the version number for the ``openstack`` client, run the " -"following command:" -msgstr "" -"例えば、``openstack`` クライアントのバージョン番号を表示する場合、以下のコマ" -"ンドを実行します。" - -msgid "" -"For example, using the image ID shown above, you would issue the command as " -"follows:" -msgstr "" -"例えば、上に示したイメージ ID を使用して、以下のとおりコマンドを発行します。" - -msgid "" -"For example, when you specify your password using the command-line client " -"with the :option:`--os-password` argument, anyone with access to your " -"computer can view it in plain text with the ``ps`` field." -msgstr "" -"例えば、コマンドラインクライアントの :option:`--os-password` 引数を使用してパ" -"スワードを指定するとき、コンピューターにアクセスできるユーザーは、``ps`` の項" -"目に平文で表示できます。" - -msgid "For example:" -msgstr "例:" - -msgid "" -"For more information, see `Configuration Reference Guide `__." -msgstr "" -"詳しい情報は `Configuration Reference Guide `__ を参照してください。" - -msgid "" -"For more sophisticated monitoring, see the `ceilometer `__ project. You can also use tools, such as `Ganglia `__ or `Graphite `__, to gather " -"more detailed data." -msgstr "" -"高機能のモニタリングについては、`ceilometer `__ プロジェクトを参照してください。 `Ganglia `__ や `Graphite `__ などのツールを使用し" -"て、より詳細なデータを収集することも可能です。" - -msgid "" -"For openSUSE, use ``zypper`` to install the clients from the distribution " -"packages service:" -msgstr "" -"openSUSE の場合、``zypper`` を使用して、ディストリビューションのパッケージ" -"サービスからクライアントをインストールします。" - -msgid "" -"For the available OpenStack documentation, see `docs.openstack.org `__." -msgstr "" -"利用可能な OpenStack ドキュメントは `docs.openstack.org `__ を参照してください。" - -msgid "FormPost" -msgstr "FormPost" - -msgid "Free up space in a thinly-provisioned back end." -msgstr "シンプロビジョニングされたバックエンドで空き容量を解放する" - -msgid "" -"From a network architecture point of view, this service must be accessible " -"to customers and the public API for each OpenStack service. To use the " -"administrator functionality for other services, it must also connect to " -"Admin API endpoints, which should not be accessible by customers." -msgstr "" -"ネットワークアーキテクチャーの観点で見ると、このサービスは顧客からアクセスす" -"ることができる必要があると同時に、各 OpenStack サービスのパブリック API にも" -"アクセスできる必要があります。他の OpenStack サービスの管理機能を利用するに" -"は、管理 API エンドポイントにもアクセスできる必要があります。管理 API エンド" -"ポイントは顧客からはアクセスできないようにすべきです。" - -msgid "" -"Gathers statistics, lists items, updates metadata, and uploads, downloads, " -"and deletes files stored by the Object Storage service. Gains access to an " -"Object Storage installation for ad hoc processing." -msgstr "" -"統計情報を収集し、項目を一覧表示し、メタデータを更新し、Object Storage サービ" -"スにより保存されたファイルをアップロード、ダウンロード、削除します。" - -msgid "" -"Generally, extra properties on an Image service image to which only cloud " -"administrators have access. Limits which user roles can perform CRUD " -"operations on that property. The cloud administrator can configure any image " -"property as protected." -msgstr "" -"クラウド管理者のみがアクセスできる、Image service のイメージの追加プロパ" -"ティー。どのユーザーロールがそのプロパティーにおいて CRUD 操作を実行できるか" -"を制限する。クラウド管理者は、保護されたイメージのプロパティーをすべて設定で" -"きる。" - -msgid "Get CPU, memory, I/O, and network statistics for an instance." -msgstr "インスタンスの CPU、メモリー、I/O、ネットワーク統計を取得します。" - -msgid "" -"Get a summary of resource usage of all of the instances running on the host:" -msgstr "" -"ホストで実行中のインスタンスすべてのリソース使用状況 (概要) を取得します。" - -msgid "Get diagnostic statistics:" -msgstr "診断統計を取得します。" - -msgid "Get started with OpenStack" -msgstr "OpenStack 入門" - -msgid "Get summary statistics for each tenant:" -msgstr "各テナントの統計サマリーを取得します。" - -msgid "Give a clear, concise summary." -msgstr "明瞭で簡潔なまとめを。" - -msgid "" -"Gives guest VMs exclusive access to a PCI device. Currently supported in " -"OpenStack Havana and later releases." -msgstr "" -"ゲスト仮想マシンが PCI デバイスに排他的にアクセスされる。OpenStack Havana 以" -"降でサポートされる。" - -msgid "Glossary" -msgstr "用語集" - -msgid "GlusterFS" -msgstr "GlusterFS" - -msgid "Governance service" -msgstr "Governance service" - -msgid "Graphic Interchange Format (GIF)" -msgstr "Graphic Interchange Format (GIF)" - -msgid "Graphics Processing Unit (GPU)" -msgstr "Graphics Processing Unit (GPU)" - -msgid "Green Threads" -msgstr "Green Threads" - -msgid "Grizzly" -msgstr "Grizzly" - -msgid "Group" -msgstr "グループ" - -msgid "HTML title" -msgstr "HTML タイトル" - -msgid "Hadoop" -msgstr "Hadoop" - -msgid "Hadoop Distributed File System (HDFS)" -msgstr "Hadoop Distributed File System (HDFS)" - -msgid "Handles authentication and is usually OpenStack Identity." -msgstr "認証を処理します。通常は認証に OpenStack Identity を使用します。" - -msgid "Hash of image data to use for verification." -msgstr "イメージデータの検証に使用するハッシュ。" - -msgid "Havana" -msgstr "Havana" - -msgid "Heat Orchestration Template (HOT)" -msgstr "Heat Orchestration Template (HOT)" - -msgid "Heat input in the format native to OpenStack." -msgstr "OpenStack 固有形式の Heat の入力データ。" - -msgid "Help URL" -msgstr "ヘルプ URL" - -msgid "Helpful information that prevents the user from making mistakes." -msgstr "ユーザーが間違いやすい箇所についてアドバイスです。" - -msgid "" -"High-availability mode for legacy (nova) networking. Each compute node " -"handles NAT and DHCP and acts as a gateway for all of the VMs on it. A " -"networking failure on one compute node doesn't affect VMs on other compute " -"nodes." -msgstr "" -"レガシーネットワーク (nova) の高可用性モード。各コンピュートノードは、NAT と " -"DHCP を処理し、すべての仮想マシンのゲートウェイとして動作する。あるコンピュー" -"トノードにおけるネットワーク障害は、他のコンピュートノードにある仮想マシンに" -"影響しません。" - -msgid "" -"High-performance 64-bit file system created by Silicon Graphics. Excels in " -"parallel I/O operations and data consistency." -msgstr "" -"Silicon Graphics 社により作成された、高性能な 64 ビットファイルシステム。並" -"列 I/O 処理とデータ一貫性に優れる。" - -msgid "Host Bus Adapter (HBA)" -msgstr "Host Bus Adapter (HBA)" - -msgid "Hyper-V" -msgstr "Hyper-V" - -msgid "Hypertext Transfer Protocol (HTTP)" -msgstr "Hypertext Transfer Protocol (HTTP)" - -msgid "Hypertext Transfer Protocol Secure (HTTPS)" -msgstr "Hypertext Transfer Protocol Secure (HTTPS)" - -msgid "ICMP" -msgstr "ICMP" - -msgid "ID number" -msgstr "ID 番号" - -msgid "IDS" -msgstr "IDS" - -msgid "INI" -msgstr "INI" - -msgid "IOPS" -msgstr "IOPS" - -msgid "" -"IOPS (Input/Output Operations Per Second) are a common performance " -"measurement used to benchmark computer storage devices like hard disk " -"drives, solid state drives, and storage area networks." -msgstr "" -"IOPS (Input/Output Operations Per Second) は、ハードディスク、SSD、SAN などの" -"ストレージデバイスをベンチマークするために使用される、一般的なパフォーマンス" -"指標である。" - -msgid "IP Address Management (IPAM)" -msgstr "IP Address Management (IPAM)" - -msgid "IP address" -msgstr "IP アドレス" - -msgid "IPL" -msgstr "IPL" - -msgid "IPMI" -msgstr "IPMI" - -msgid "IQN" -msgstr "IQN" - -msgid "ISO9660" -msgstr "ISO9660" - -msgid "IaaS" -msgstr "IaaS" - -msgid "Icehouse" -msgstr "Icehouse" - -msgid "Identity API" -msgstr "Identity API" - -msgid "Identity back end" -msgstr "Identity バックエンド" - -msgid "Identity service" -msgstr "Identity サービス" - -msgid "Identity service API" -msgstr "Identity service API" - -msgid "" -"If Object Storage finds objects, containers, or accounts that are corrupt, " -"they are placed in this state, are not replicated, cannot be read by " -"clients, and a correct copy is re-replicated." -msgstr "" -"Object Storage が壊れたオブジェクト、コンテナー、アカウントを見つけた際に、そ" -"のデータはこの状態にセットされる。この状態にセットされたデータは、複製され" -"ず、クライアントが読み出すこともできなくなり、正しいコピーが再複製される。" - -msgid "" -"If a requested resource such as CPU time, disk storage, or memory is not " -"available in the parent cell, the request is forwarded to its associated " -"child cells. If the child cell can fulfill the request, it does. Otherwise, " -"it attempts to pass the request to any of its children." -msgstr "" -"CPU 時間、ディスクストレージ、メモリ等の要求されたリソースが親セルで利用不可" -"の場合、リクエストは親セルに紐付けられた子セルに転送される。子セルがリクエス" -"トに対応可能な場合、子セルはそのリクエストを処理する。対応不可の場合、そのリ" -"クエストを自分の子セルに渡そうとする。" - -msgid "" -"If a requested resource, such as CPU time, disk storage, or memory, is not " -"available in the parent cell, the request is forwarded to associated child " -"cells." -msgstr "" -"要求されたリソース(CPU時間、ディスクストレージ、メモリ)が親セルで利用不可の" -"場合、そのリクエストは紐付けられた子セルに転送される。" - -msgid "" -"If the volume is in use or has snapshots, the specified host destination " -"cannot accept the volume. If the user is not an administrator, the migration " -"fails." -msgstr "" -"ボリュームが使用中の場合、あるいはスナップショットが存在する場合、指定のマイ" -"グレーション先のホストは、そのボリュームを受け入れることができません。また、" -"管理ユーザーではない場合も、マイグレーションは失敗します。" - -msgid "" -"If you do not have a sufficient quota for the transfer, the transfer is " -"refused." -msgstr "転送用のクォータが不足している場合、転送は拒否されます。" - -msgid "" -"If you do not want to use this theme you can remove it and its dependencies " -"using the following command:" -msgstr "" -"このテーマを使用したくない場合、以下のコマンドを使用して、このテーマと依存" -"パッケージを削除できます。" - -msgid "" -"If you encounter problems in creating an image in the Image service or " -"Compute, the following information may help you troubleshoot the creation " -"process." -msgstr "" -"Image service や Compute でイメージの作成に関する問題に遭遇した場合、以下の情" -"報が作成プロセスのトラブルシュートに役立つかもしれません。" - -msgid "" -"If you need to install the source package for the command-line package, the " -"following table lists the software needed to run the command-line clients, " -"and provides installation instructions as needed." -msgstr "" -"コマンドラインパッケージのソースパッケージをインストールする必要がある場合、" -"コマンドラインクライアントを実行するために必要となるソフトウェアの一覧が以下" -"の表にまとめられています。必要に応じてインストール手順が書かれています。" - -msgid "" -"If you specify a disk or CD-ROM bus model that is not supported, see the " -"Disk_and_CD-ROM_bus_model_values_table_. If you specify a VIF model that is " -"not supported, the instance fails to launch. See the VIF_model_values_table_." -msgstr "" -"サポートされていないディスクまたは CD-ROM バスモデルを指定する場合は " -"Disk_and_CD-ROM_bus_model_values_table_ (ディスクと CD-ROM のバスモデルの値) " -"を参照してください。また、サポートされていない VIF モデルを指定する場合、イン" -"スタンスは起動できません。 VIF_model_values_table_ (仮想インターフェースのモ" -"デルの値) を参照してください。 " - -msgid "" -"If your volume was created successfully, its status is ``available``. If its " -"status is ``error``, you might have exceeded your quota." -msgstr "" -"ボリュームが正常に作成された場合、状態は ``available`` にとなります。状態が " -"``error`` の場合、クォータを超過している可能性があります。" - -msgid "Image API" -msgstr "Image API" - -msgid "Image service" -msgstr "Image service" - -msgid "Image service API" -msgstr "Image service API" - -msgid "" -"Impassable limits for guest VMs. Settings include total RAM size, maximum " -"number of vCPUs, and maximum disk size." -msgstr "" -"ゲスト仮想マシンの超えられない制限。合計メモリー容量、最大仮想 CPU 数、最大" -"ディスク容量の設定。" - -msgid "Implemented as a filesystem underlying OpenStack Compute" -msgstr "OpenStack Compute が動作するファイルシステムとして実装" - -msgid "" -"In Compute and Block Storage, the ability to set resource limits on a per-" -"project basis." -msgstr "" -"プロジェクト単位に使用できるリソース上限を設定できる、Compute と Block " -"Storage の機能。" - -msgid "" -"In Compute, conductor is the process that proxies database requests from the " -"compute process. Using conductor improves security because compute nodes do " -"not need direct access to the database." -msgstr "" -"Compute において、コンピュートプロセスからのデータベース要求をプロキシーする" -"処理。コンダクターを使用することにより、コンピュートノードがデータベースに直" -"接アクセスする必要がなくなるので、セキュリティーを向上できる。" - -msgid "" -"In Object Storage, tools to test and ensure dispersion of objects and " -"containers to ensure fault tolerance." -msgstr "" -"Object Storage で、フォールトトレラントの確認の為に、オブジェクトとコンテナの" -"分散をテスト、確認するツール。" - -msgid "" -"In OpenStack Identity, entities represent individual API consumers and are " -"owned by a specific domain. In OpenStack Compute, a user can be associated " -"with roles, projects, or both." -msgstr "" -"OpenStack Identity では、エンティティーは個々の API 利用者を表す、特定のドメ" -"インに属する。OpenStack Compute では、ユーザーはロール、プロジェクトもしくは" -"その両者と関連付けることができる。" - -msgid "" -"In OpenStack, the API version for a project is part of the URL. For example, " -"``example.com/nova/v1/foobar``." -msgstr "" -"OpenStack では、プロジェクトの API バージョンが URL の一部となる。例: " -"``example.com/nova/v1/foobar``" - -msgid "" -"In a high-availability setup with an active/active configuration, several " -"systems share the load together and if one fails, the load is distributed to " -"the remaining systems." -msgstr "" -"アクティブ/アクティブ設定を用いた高可用構成の場合、複数のシステムが処理を一緒" -"に分担する。また、あるシステムが故障した場合、処理が残りのシステムに分散され" -"る。" - -msgid "" -"In a high-availability setup with an active/passive configuration, systems " -"are set up to bring additional resources online to replace those that have " -"failed." -msgstr "" -"アクティブ/パッシブ設定を用いた高可用性セットアップでは、故障したシステムを置" -"き換えるために、システムが追加リソースをオンラインにするようセットアップされ" -"る。" - -msgid "" -"In a text editor, create a file named ``PROJECT-openrc.sh`` and add the " -"following authentication information:" -msgstr "" -"テキストエディターで ``PROJECT-openrc.sh`` という名前のファイルを作成し、以下" -"の認証情報を追加します。" - -msgid "" -"In order to benefit from the Identity service, other OpenStack services need " -"to collaborate with it. When an OpenStack service receives a request from a " -"user, it checks with the Identity service whether the user is authorized to " -"make the request." -msgstr "" -"Identity サービスの恩恵を受けるには、他の OpenStack サービスは Identity サー" -"ビスと協調する必要があります。OpenStack サービスは、ユーザーからのリクエスト" -"を受けたとき、ユーザーがリクエストを発行する権限があるかどうかを Identity " -"サービスを用いて確認します。" - -msgid "" -"In the context of Object Storage, this is a process that is not terminated " -"after an upgrade, restart, or reload of the service." -msgstr "" -"Object Storage の文脈において、サービスの更新、再起動、再読み込みの後に終了し" -"ないプロセス。" - -msgid "" -"In the context of the Identity service, the worker process that provides " -"access to the admin API." -msgstr "Identity の領域で、管理 API へのアクセスを提供するワーカープロセス。" - -msgid "" -"In the following example, the ``demo-openrc.sh`` file is sourced for the " -"demo project:" -msgstr "" -"以下の例では、``demo-openrc.sh`` が demo プロジェクト用に読み込まれます。" - -msgid "" -"In this example, :option:`--force-host-copy True` forces the generic host-" -"based migration mechanism and bypasses any driver optimizations." -msgstr "" -"この例では、 :option:`--force-host-copy True` は、強制的に一般的なホストベー" -"スのマイグレーションの仕組みを使用してドライバーの最適化なしで進めます。" - -msgid "" -"Information that consists solely of ones and zeroes, which is the language " -"of computers." -msgstr "1 と 0 だけから構成される情報。コンピューターの言語。" - -msgid "" -"Infrastructure-as-a-Service. IaaS is a provisioning model in which an " -"organization outsources physical components of a data center, such as " -"storage, hardware, servers, and networking components. A service provider " -"owns the equipment and is responsible for housing, operating and maintaining " -"it. The client typically pays on a per-use basis. IaaS is a model for " -"providing cloud services." -msgstr "" -"Infrastructure-as-a-Service。IaaS は、ストレージ、ハードウェア、サーバー、" -"ネットワークなど、データセンターの物理コンポーネントをアウトソースする組織の" -"配備モデル。サーバープロバイダーは、設備を所有し、ハウジング、運用、メンテナ" -"ンスに責任を持つ。クライアントは、一般的に使用量に応じて費用を払う。IaaS は、" -"クラウドサービスを提供するモデル。" - -msgid "Initial Program Loader." -msgstr "Initial Program Loader。初期プログラムローダー。" - -msgid "Install each client separately by using the following command:" -msgstr "以下のコマンドを使用して、各クライアントをそれぞれインストールします。" - -msgid "Install pip through the package manager for your system:" -msgstr "" -"お使いのシステムのパッケージマネージャーから pip をインストールします。" - -msgid "Install the Database service." -msgstr "Database サービスをインストールします。" - -msgid "Install the OpenStack client" -msgstr "OpenStack client のインストール" - -msgid "Install the OpenStack command-line clients" -msgstr "OpenStack コマンドラインクライアントのインストール" - -msgid "Install the prerequisite software" -msgstr "前提ソフトウェアのインストール" - -msgid "" -"Install the prerequisite software and the Python package for each OpenStack " -"client." -msgstr "" -"前提ソフトウェアと各 OpenStack クライアント用の Python パッケージをインストー" -"ルします。" - -msgid "Installed by default on Mac OS X." -msgstr "Mac OS X に標準でインストールされます。" - -msgid "Installing from packages" -msgstr "パッケージからのインストール" - -msgid "Installing with pip" -msgstr "pip を用いたインストール" - -msgid "Instances in use" -msgstr "使用中のインスタンス" - -msgid "" -"Instruments the complex system flows that support provisioning instances, " -"managing the lifecycle of instances, and performing operations on instances." -msgstr "" -"インスタンスの展開、インスタンスのライフサイクルの管理、インスタンス上の処理" -"の実行をサポートする複雑なシステムフローの全体指揮を行います。" - -msgid "" -"Integration with vendor specific management tools, such as Apache Ambari or " -"Cloudera Management Console." -msgstr "" -"ベンダー固有の管理ツールとの統合。 Apache Ambari や Cloudera Management " -"Console など。" - -msgid "" -"Intelligent Platform Management Interface. IPMI is a standardized computer " -"system interface used by system administrators for out-of-band management of " -"computer systems and monitoring of their operation. In layman's terms, it is " -"a way to manage a computer using a direct network connection, whether it is " -"turned on or not; connecting to the hardware rather than an operating system " -"or login shell." -msgstr "" -"Intelligent Platform Management Interface。IPMI は、コンピューターシステムの" -"アウトオブバンド管理、運用監視のために、システム管理者により使用される標準的" -"なコンピューターシステムインターフェース。平たく言うと、電源状態によらず、" -"ネットワークの直接通信を使用してコンピューターを管理する方法。オペレーティン" -"グシステムやログインシェルではなく、ハードウェアに接続する。" - -msgid "" -"Interactions and processes that are obfuscated from the user, such as " -"Compute volume mount, data transmission to an iSCSI target by a daemon, or " -"Object Storage object integrity checks." -msgstr "" -"Compute のボリュームのマウント、デーモンによる iSCSI ターゲットへのデータ転" -"送、Object Storage のオブジェクトの完全性検査など、ユーザーから見えにくい操作" -"や処理。" - -msgid "" -"Interacts directly with the Block Storage service, and processes such as the " -"``cinder-scheduler``. It also interacts with these processes through a " -"message queue. The ``cinder-volume`` service responds to read and write " -"requests sent to the Block Storage service to maintain state. It can " -"interact with a variety of storage providers through a driver architecture." -msgstr "" -"``cinder-scheduler`` などの Block Storage サービスやプロセスと直接やり取りを" -"行います。また、メッセージキュー経由でもこれらのプロセスと連携します。 " -"``cinder-volume`` サービスは、Block Storage サービスへ送られた読み出し、書き" -"込みリクエストに応答し、状態を管理します。ドライバーアーキテクチャーになって" -"おり、様々なストレージプロバイダーと連携できます。" - -msgid "" -"Interface within Networking that enables organizations to create custom plug-" -"ins for advanced features, such as QoS, ACLs, or IDS." -msgstr "" -"組織が QoS、ACL、IDS などの高度な機能向けのカスタムプラグインを作成できるよう" -"にする、Networking 内のインターフェース。" - -msgid "" -"Internally, OpenStack services are composed of several processes. All " -"services have at least one API process, which listens for API requests, " -"preprocesses them and passes them on to other parts of the service. With the " -"exception of the Identity service, the actual work is done by distinct " -"processes." -msgstr "" -"内部では、 それぞれの OpenStack サービスは複数のプロセスから構成されていま" -"す。すべてのサービスには少なくとも 1 つの API プロセスがあり、この API プロセ" -"スは API リクエストを待ち受け、API リクエストの前処理を行ってから、そのサービ" -"スの別の構成要素にそのリクエストを渡します。実際の処理は別のプロセスによって" -"行わます。ただし、 Identity service は例外です。" - -msgid "" -"Internally, each command uses cURL command-line tools, which embed API " -"requests. OpenStack APIs are RESTful APIs, and use the HTTP protocol. They " -"include methods, URIs, media types, and response codes." -msgstr "" -"内部的に、各コマンドは API リクエストを組み込んだ cURL コマンドラインツールを" -"使用します。OpenStack API は HTTP プロトコルを使用する RESTful API です。メ" -"ソッド、URI、メディアタイプ、応答コードなどが含まれます。" - -msgid "" -"Internet Control Message Protocol, used by network devices for control " -"messages. For example, :command:`ping` uses ICMP to test connectivity." -msgstr "" -"インターネット制御メッセージプロトコル。制御メッセージ用にネットワークデバイ" -"スにより使用される。例えば、:command:`ping` は接続性をテストするために ICMP " -"を使用する。" - -msgid "Internet Service Provider (ISP)" -msgstr "Internet Service Provider (ISP)" - -msgid "Internet Small Computer System Interface (iSCSI)" -msgstr "Internet Small Computer System Interface (iSCSI)" - -msgid "Internet protocol (IP)" -msgstr "インターネットプロトコル (IP)" - -msgid "Intrusion Detection System." -msgstr "侵入検知システム。" - -msgid "It includes the following components:" -msgstr "以下のコンポーネントで構成されます。" - -msgid "Java" -msgstr "Java" - -msgid "JavaScript" -msgstr "JavaScript" - -msgid "JavaScript Object Notation (JSON)" -msgstr "JavaScript Object Notation (JSON)" - -msgid "Jenkins" -msgstr "Jenkins" - -msgid "Juno" -msgstr "Juno" - -msgid "Kerberos" -msgstr "Kerberos" - -msgid "Key Manager service" -msgstr "Key Manager サービス" - -msgid "Key features are:" -msgstr "主要機能な以下の通りです。" - -msgid "Kickstart" -msgstr "Kickstart" - -msgid "Kilo" -msgstr "Kilo" - -msgid "LBaaS" -msgstr "LBaaS" - -msgid "" -"LBaaS feature that provides availability monitoring using the ``ping`` " -"command, TCP, and HTTP/HTTPS GET." -msgstr "" -"``ping`` コマンド、TCP、HTTP/HTTPS GET を使用してモニタリングする機能を提供す" -"る LBaaS の機能。" - -msgid "" -"Launches stacks from templates, views details of running stacks including " -"events and resources, and updates and deletes stacks." -msgstr "" -"テンプレートからスタックを起動し、イベントやリソースを含む実行中のスタックの" -"詳細を表示し、スタックを更新、削除します。" - -msgid "Launchpad" -msgstr "Launchpad" - -msgid "Layer-2 (L2) agent" -msgstr "L2 エージェント" - -msgid "Layer-2 network" -msgstr "L2 ネットワーク" - -msgid "Layer-3 (L3) agent" -msgstr "L3 エージェント" - -msgid "Layer-3 network" -msgstr "L3 ネットワーク" - -msgid "Liberty" -msgstr "Liberty" - -msgid "Lightweight Directory Access Protocol (LDAP)" -msgstr "Lightweight Directory Access Protocol (LDAP)" - -msgid "Linux Bridge neutron plug-in" -msgstr "Linux Bridge neutron プラグイン" - -msgid "Linux bridge" -msgstr "Linux ブリッジ" - -msgid "Linux containers (LXC)" -msgstr "Linux コンテナー (LXC)" - -msgid "" -"Linux kernel feature that provides independent virtual networking instances " -"on a single host with separate routing tables and interfaces. Similar to " -"virtual routing and forwarding (VRF) services on physical network equipment." -msgstr "" -"別々のルーティングテーブルとインターフェースを持つ単一のホストにおいて、独立" -"した仮想ネットワークインターフェースを提供する Linux カーネル機能。物理ネット" -"ワーク環境における仮想ルーティングおよびフォワーディング (VRF) サービスと似て" -"いる。" - -msgid "" -"Linux kernel security module that provides the mechanism for supporting " -"access control policies." -msgstr "" -"アクセス制御ポリシーをサポートするための機構を提供する Linux カーネルセキュリ" -"ティーモジュール。" - -msgid "List available volumes and their statuses:" -msgstr "利用可能なボリュームとその状態を表示します。" - -msgid "" -"List images, and note the ID of the image that you want to use for your " -"volume:" -msgstr "" -"イメージを一覧表示し、ボリュームに使用したいイメージの ID を記録します。" - -msgid "List instances:" -msgstr "インスタンスを一覧表示します。" - -msgid "List or get details for images (glance)" -msgstr "イメージ (glance) の詳細の一覧表示と取得" - -msgid "" -"List the availability zones, and note the ID of the availability zone in " -"which you want to create your volume:" -msgstr "" -"アベイラビリティゾーンを表示して、その中からボリュームを作成するアベイラビリ" -"ティゾーンの ID をメモします。 " - -msgid "List the hosts and the nova-related services that run on them:" -msgstr "" -"ホストおよびそのホストで実行されている nova 関連のサービスを一覧表示します。" - -msgid "" -"List the volumes again, and note that the status of your volume is " -"``deleting``:" -msgstr "" -" 再度ボリュームを表示して、ボリュームの状態が deleting となっていることを確認" -"します。 " - -msgid "List volumes:" -msgstr "ボリュームを一覧表示します。" - -msgid "Lists allowed commands within the Compute rootwrap facility." -msgstr "Compute rootwrap 機能内で許可されるコマンドの一覧。" - -msgid "" -"Lists containers in Object Storage and stores container information in the " -"account database." -msgstr "" -"Object Storage にあるコンテナーを一覧表示し、コンテナーの情報をアカウントデー" -"タベースに保存する。" - -msgid "" -"Local file that contains the disk image to be uploaded during the update. " -"Alternatively, you can pass images to the client through stdin." -msgstr "" -"更新中にアップロードするディスクイメージを含むローカルファイル。または、標準" -"入力からクライアントにイメージを渡すことができます。" - -msgid "Logged in banner: 216 x 35" -msgstr "ログイン後のバナー: 216 x 35" - -msgid "Logical Volume Manager (LVM)" -msgstr "論理ボリュームマネージャー (LVM)" - -msgid "Logical architecture" -msgstr "論理アーキテクチャー" - -msgid "" -"Logical groupings of related code, such as the Block Storage volume manager " -"or network manager." -msgstr "" -"Block Storage のボリュームマネージャーやネットワークマネージャーなど、関連す" -"るコードの論理的なグループ。" - -msgid "Logical subdivision of an IP network." -msgstr "IP ネットワークの論理分割。" - -msgid "Login screen: 365 x 50" -msgstr "ログイン画面: 365 x 50" - -msgid "Logo" -msgstr "ロゴ" - -msgid "Logo and site colors" -msgstr "ロゴとサイトカラー" - -msgid "Logo link" -msgstr "ロゴリンク" - -msgid "" -"Lower power consumption CPU often found in mobile and embedded devices. " -"Supported by OpenStack." -msgstr "" -"モバイル機器や組み込みデバイスによく利用される低消費電力 CPU。OpenStack はサ" -"ポートしている。" - -msgid "" -"MD5 hash of an object within Object Storage, used to ensure data integrity." -msgstr "" -"Object Storage 内のオブジェクトの MD5 ハッシュ。データの完全性を確認するため" -"に使用される。" - -msgid "Makes an image accessible for all the tenants (admin-only by default)." -msgstr "" -"イメージをすべてのテナントからアクセス可能にします (デフォルトは admin 専" -"用)。" - -msgid "Manage images" -msgstr "イメージの管理" - -msgid "Manage volumes" -msgstr "ボリュームの管理" - -msgid "" -"Managed through REST API with UI available as part of OpenStack dashboard." -msgstr "" -"REST API 経由で管理でき、 OpenStack dashboard の一部として UI も提供されてい" -"ます。" - -msgid "Manages accounts defined with Object Storage." -msgstr "Object Storage で定義されるアカウントを管理します。" - -msgid "Manages actual objects, such as files, on the storage nodes." -msgstr "" -"ストレージノードにおいて、ファイルなどの実際のオブジェクトを管理します。" - -msgid "" -"Manages the lifecycle of compute instances in an OpenStack environment. " -"Responsibilities include spawning, scheduling and decommissioning of virtual " -"machines on demand." -msgstr "" -"OpenStack 環境でコンピュートインスタンスのライフサイクルを管理します。要求に" -"応じて仮想マシンの作成、スケジューリング、破棄などに責任を持ちます。" - -msgid "Manages the mapping of containers or folders, within Object Storage." -msgstr "Object Storage 内でコンテナーやフォルダーの対応付けを管理します。" - -msgid "" -"Many Linux distributions provide packages to make setuptools easy to " -"install. Search your package manager for setuptools to find an installation " -"package. If you cannot find one, download the setuptools package directly " -"from https://pypi.python.org/pypi/setuptools." -msgstr "" -"Linux ディストリビューションの多くは、setuptools を簡単にインストールするため" -"のパッケージを提供しています。お使いのパッケージマネージャーで setuptools を" -"検索してインストレーションパッケージを見つけてください。見つからない場合は、" -"https://pypi.python.org/pypi/setuptools から直接 setuptools パッケージをダウ" -"ンロードしてください。 " - -msgid "Maps Object Storage partitions to physical storage devices." -msgstr "Object Storage パーティションの物理ストレージデバイスへの対応付け" - -msgid "" -"Massively scalable distributed storage system that consists of an object " -"store, block store, and POSIX-compatible distributed file system. Compatible " -"with OpenStack." -msgstr "" -"オブジェクトストア、ブロックストア、および POSIX 互換分散ファイルシステムから" -"構成される大規模スケール可能分散ストレージシステム。OpenStack 互換。" - -msgid "" -"Maximum frame or packet size for a particular network medium. Typically 1500 " -"bytes for Ethernet networks." -msgstr "" -"特定のネットワークメディア向けの最大フレームやパケットサイズ。一般的に、イー" -"サネット向けは 1500 バイト。" - -msgid "" -"Mechanism for highly-available multi-host routing when using OpenStack " -"Networking (neutron)." -msgstr "" -"OpenStack Networking (neutron) の使用時、高可用なマルチホストルーティングのた" -"めの機構。" - -msgid "" -"Mechanism in IP networks to detect end-to-end MTU and adjust packet size " -"accordingly." -msgstr "" -"エンド間の MTU を検出し、パケットサイズを適切に調整するための IP ネットワーク" -"における機構。" - -msgid "" -"Message exchange that is cleared when the service restarts. Its data is not " -"written to persistent storage." -msgstr "" -"サービスの再起動時に削除されるメッセージ交換。このデータは永続ストレージに書" -"き込まれない。" - -msgid "" -"Message queue software supported by OpenStack. An alternative to RabbitMQ. " -"Also spelled 0MQ." -msgstr "" -"OpenStack によりサポートされるメッセージキューソフトウェア。RabbitMQ の代替。" -"0MQ とも表記。" - -msgid "" -"Message queue software supported by OpenStack; an alternative to RabbitMQ." -msgstr "" -"OpenStack によりサポートされるメッセージキューソフトウェア。RabbitMQ の代替。" - -msgid "" -"Message queue that is cleared when the service restarts. Its data is not " -"written to persistent storage." -msgstr "" -"サービスの再起動時に削除されるメッセージキュー。このデータは永続ストレージに" -"書き込まれない。" - -msgid "Message service" -msgstr "Message サービス" - -msgid "Messaging queue" -msgstr "メッセージングキュー" - -msgid "Meta-Data Server (MDS)" -msgstr "Meta-Data Server (MDS)" - -msgid "Metadata agent" -msgstr "メタデータエージェント" - -msgid "Metadata definition service" -msgstr "メタデータ定義サービス" - -msgid "" -"Method to access VM instance consoles using a web browser. Supported by " -"Compute." -msgstr "" -"Web ブラウザーを使用して仮想マシンインスタンスのコンソールにアクセスする方" -"法。Compute によりサポートされる。" - -msgid "" -"Middleware modules run in the address space of the OpenStack component that " -"is using the Identity service. These modules intercept service requests, " -"extract user credentials, and send them to the centralized server for " -"authorization. The integration between the middleware modules and OpenStack " -"components uses the Python Web Server Gateway Interface." -msgstr "" -"ミドルウェアモジュールは、Identity サービスを使用している OpenStack コンポー" -"ネントの一部として動作します。これらのモジュールは、サービスリクエスト処理の" -"中で、ユーザーのクレデンシャルを抽出し、認可を行うためそのクレデンシャルを中" -"央サーバーに送信します。ミドルウェアモジュールと OpenStack コンポーネントの間" -"の統合には、Python Web Server Gateway Interface を使用します。" - -msgid "Migrate a volume" -msgstr "ボリュームの移行" - -msgid "" -"Migrate a volume with the :command:`cinder migrate` command, as shown in the " -"following example:" -msgstr "" -"以下の例にあるように、:command:`cinder migrate` コマンドでボリュームを移行し" -"ます。" - -msgid "Mitaka" -msgstr "Mitaka" - -msgid "Modify the properties of a volume." -msgstr "ボリュームのプロパティーを編集します。" - -msgid "Modular Layer 2 (ML2) neutron plug-in" -msgstr "Modular Layer 2 (ML2) neutron プラグイン" - -msgid "" -"Modular system that allows the underlying message queue software of Compute " -"to be changed. For example, from RabbitMQ to ZeroMQ or Qpid." -msgstr "" -"Compute が利用するメッセージキューソフトウェアを変更できるようにする仕組み。" -"例えば、 RabbitMQ を ZeroMQ や Qpid に変更できる。" - -msgid "Modules" -msgstr "モジュール" - -msgid "Monitor (LBaaS)" -msgstr "モニター (LBaaS)" - -msgid "Monitor (Mon)" -msgstr "モニター (Mon)" - -msgid "Monitoring" -msgstr "Monitoring" - -msgid "Monitoring solution." -msgstr "監視ソリューション。" - -msgid "" -"Monitors and meters the OpenStack cloud for billing, benchmarking, " -"scalability, and statistical purposes." -msgstr "" -"課金、ベンチマーク、スケーラビリティ、統計などの目的のために、OpenStack クラ" -"ウドを監視および計測します。" - -msgid "" -"Most Linux distributions include packaged versions of the command-line " -"clients that you can install directly, see Installing_from_packages_." -msgstr "" -"多くの Linux ディストリビューションには、コマンドラインクライアントを直接イン" -"ストールできるパッケージがあります。詳細は Installing_from_packages_ を参照し" -"てください。" - -msgid "" -"Mounted via OpenStack Block Storage controlled protocol (for example, iSCSI)" -msgstr "" -"OpenStack Block Storage が制御するプロトコル (例: iSCSI) 経由でマウントされる" - -msgid "MultiNic" -msgstr "MultiNic" - -msgid "NAT" -msgstr "NAT" - -msgid "NTP" -msgstr "NTP" - -msgid "Name for the Compute component that manages VMs." -msgstr "仮想マシンを管理する Compute のコンポーネントの名称。" - -msgid "Name of the snapshot. Defaults to ``None``." -msgstr "スナップショットの名前。デフォルトは ``None`` です。" - -msgid "Name or ID of the snapshot to unmanage." -msgstr "管理対象外にするスナップショットの名前または ID。" - -msgid "Name, ID, or other identifier for an existing snapshot." -msgstr "既存のスナップショットの名前、ID または他の識別子。" - -msgid "Nebula" -msgstr "Nebula" - -msgid "NetApp volume driver" -msgstr "NetApp ボリュームドライバー" - -msgid "" -"Network Address Translation; Process of modifying IP address information " -"while in transit. Supported by Compute and Networking." -msgstr "" -"ネットワークアドレス変換。IP アドレス情報を転送中に変更する処理。Compute と " -"Networking によりサポートされる。" - -msgid "Network File System (NFS)" -msgstr "Network File System (NFS)" - -msgid "" -"Network Time Protocol; Method of keeping a clock for a host or node correct " -"via communication with a trusted, accurate time source." -msgstr "" -"ネットワーク時刻プロトコル。信頼された、正確な時刻源と通信することにより、ホ" -"ストやノードの時刻を正確に保つ方法。" - -msgid "" -"Network traffic between a user or client (north) and a server (south), or " -"traffic into the cloud (south) and out of the cloud (north). See also east-" -"west traffic." -msgstr "" -"ユーザーやクライアント (ノース)、とサーバー (サウス) 間のネットワーク通信、ク" -"ラウド (サウス) とクラウド外 (ノース) 内の通信。イースト・サウス通信も参照。" - -msgid "" -"Network traffic between servers in the same cloud or data center. See also " -"north-south traffic." -msgstr "" -"同じクラウドやデータセンターにあるサーバー間のネットワーク通信。ノース・サウ" -"ス通信も参照。" - -msgid "Networking API" -msgstr "Networking API" - -msgid "Networking service" -msgstr "Networking サービス" - -msgid "" -"New users are assigned to this tenant if no tenant is specified when a user " -"is created." -msgstr "" -"ユーザーを作成したときに、テナントを指定していない場合、新規ユーザーはこのテ" -"ナントに割り当てられる。" - -msgid "Newton" -msgstr "Newton" - -msgid "Nexenta volume driver" -msgstr "Nexenta ボリュームドライバー" - -msgid "No ACK" -msgstr "No ACK" - -msgid "" -"Note that extra dependencies may be required, per operating system, " -"depending on the package being installed, such as is the case with Tempest." -msgstr "" -"Tempest の場合など、インストールするパッケージに応じて、依存関係によりオペ" -"レーティングシステムごとの追加パッケージが必要になるかもしれないことに注意し" -"てください。" - -msgid "Note that the volume is now available." -msgstr "ボリュームが利用可能になっていることに注意してください。" - -msgid "Note the ID of your volume." -msgstr "ボリュームの ID を記録します。" - -msgid "Notices" -msgstr "注記" - -msgid "Notices take these forms:" -msgstr "注記には以下の種類があります。" - -msgid "Nova API" -msgstr "Nova API" - -msgid "" -"Number that is unique to every computer system on the Internet. Two versions " -"of the Internet Protocol (IP) are in use for addresses: IPv4 and IPv6." -msgstr "" -"インターネットにあるすべてのコンピューターシステムを一意にする番号。Internet " -"Protocol (IP) は、IPv4 と IPv6 の 2 つのバージョンがアドレス付けのために使用" -"中です。" - -msgid "Object Storage (swift)" -msgstr "Object Storage (swift)" - -msgid "Object Storage API" -msgstr "Object Storage API" - -msgid "Object Storage Device (OSD)" -msgstr "Object Storage Device (OSD)" - -msgid "" -"Object Storage middleware that uploads (posts) an image through a form on a " -"web page." -msgstr "" -"Web ページのフォームからイメージをアップロード (投稿) する、Object Storage の" -"ミドルウェア。" - -msgid "Object Storage service" -msgstr "Object Storage サービス" - -msgid "Object servers (swift-object-server)" -msgstr "オブジェクトサーバー (swift-object-server)" - -msgid "" -"Object storage service by Amazon; similar in function to Object Storage, it " -"can act as a back-end store for Image service VM images." -msgstr "" -"Amazon により提供されるオブジェクトストレージ。Object Storage の機能に似てい" -"る。Image service の仮想マシンイメージのバックエンドとして動作できる。" - -msgid "Ocata" -msgstr "Ocata" - -msgid "Oldie" -msgstr "Oldie" - -msgid "" -"On Red Hat Enterprise Linux, CentOS, or Fedora, use ``yum`` to install the " -"clients from the packaged versions available in `RDO `__:" -msgstr "" -"Red Hat Enterprise Linux、CentOS、Fedora の場合、``yum`` を使用して、`RDO " -"`__ にあるクライアントのパッケージをインストール" -"します。" - -msgid "" -"On any shell from which you want to run OpenStack commands, source the " -"``PROJECT-openrc.sh`` file for the respective project." -msgstr "" -"OpenStack コマンドを実行したいシェルで、それぞれのプロジェクト用の ``PROJECT-" -"openrc.sh`` ファイルを読み込みます。" - -msgid "" -"On any shell from which you want to run OpenStack commands, source the " -"``PROJECT-openrc.sh`` file for the respective project. In this example, you " -"source the ``admin-openrc.sh`` file for the admin project:" -msgstr "" -"OpenStack コマンドを実行したいシェルで、それぞれのプロジェクト用の ``PROJECT-" -"openrc.sh`` ファイルを読み込みます。この例では、admin プロジェクト用の " -"``admin-openrc.sh`` ファイルを読み込みます。" - -msgid "" -"On the :guilabel:`API Access` tab, click :guilabel:`Download OpenStack RC " -"File` and save the file. The filename will be of the form ``PROJECT-openrc." -"sh`` where ``PROJECT`` is the name of the project for which you downloaded " -"the file." -msgstr "" -":guilabel:`API アクセス` タブで、:guilabel:`OpenStack RC ファイルのダウンロー" -"ド` をクリックしてファイルを保存します。ファイル名は ``PROJECT-openrc.sh`` の" -"形式とし、``PROJECT`` にはファイルをダウンロードするプロジェクト名を入力しま" -"す。 " - -msgid "On-instance / ephemeral" -msgstr "インスタンス上 / 一時" - -msgid "" -"Once you have the dashboard installed you can customize the way it looks and " -"feels to suit your own needs." -msgstr "" -"ダッシュボードをインストールすると、ルックアンドフィールを必要に応じてカスタ" -"マイズできます。" - -msgid "" -"One of the RPC primitives used by the OpenStack message queue software. " -"Sends a message and does not wait for a response." -msgstr "" -"OpenStack メッセージキューソフトウェアにより使用される RPC プリミティブの 1 " -"つ。メッセージを送信し、応答を待たない。" - -msgid "" -"One of the RPC primitives used by the OpenStack message queue software. " -"Sends a message and waits for a response." -msgstr "" -"OpenStack のメッセージキューソフトウェアにより使用される、RPC プリミティブの " -"1 つ。メッセージを送信し、応答を待つ。" - -msgid "One of the VM image disk formats supported by Image service." -msgstr "" -"Image service によりサポートされる、仮想マシンイメージディスク形式の 1 つ。" - -msgid "" -"One of the VM image disk formats supported by Image service; an unstructured " -"disk image." -msgstr "" -"Image service によりサポートされる仮想マシンイメージのディスク形式の 1 つ。" - -msgid "" -"One of the default roles in the Compute RBAC system and the default role " -"assigned to a new user." -msgstr "" -"Compute RBAC システムにあるデフォルトのロールの 1 つ。新規ユーザーに割り当て" -"られるデフォルトのロール。" - -msgid "" -"One of the default roles in the Compute RBAC system. Enables a user to add " -"other users to a project, interact with VM images that are associated with " -"the project, and start and stop VM instances." -msgstr "" -"Compute RBAC システムにおけるデフォルトのロールの 1 つ。ユーザーが他のユー" -"ザーをプロジェクトに追加でき、プロジェクトに関連付けられた仮想マシンイメージ" -"を操作でき、仮想マシンインスタンスを起動および終了できるようになる。" - -msgid "" -"One of the default roles in the Compute RBAC system. Enables the user to " -"allocate publicly accessible IP addresses to instances and change firewall " -"rules." -msgstr "" -"Compute RBAC システムにおけるデフォルトのロールの 1 つ。ユーザーが、パブリッ" -"クにアクセス可能な IP アドレスをインスタンスに割り当てられ、ファイアウォール" -"ルールを変更できるようになる。" - -msgid "" -"One of the default roles in the Compute RBAC system. Grants complete system " -"access." -msgstr "" -"Compute RBAC システムにおけるデフォルトのロールの 1 つ。システムの完全なアク" -"セス権を付与する。" - -msgid "One of the hypervisors supported by OpenStack." -msgstr "OpenStack によりサポートされるハイパーバイザーの一つ。" - -msgid "One of the supported response formats in OpenStack." -msgstr "OpenStack でサポートされる応答形式の 1 つ。" - -msgid "Open Cloud Computing Interface (OCCI)" -msgstr "Open Cloud Computing Interface (OCCI)" - -msgid "Open Virtualization Format (OVF)" -msgstr "Open Virtualization Format (OVF)" - -msgid "" -"Open source GUI and CLI tools used for remote console access to VMs. " -"Supported by Compute." -msgstr "" -"仮想マシンへのリモートコンソールアクセスに使用される、オープンソースの GUI / " -"CUI ツール。" - -msgid "" -"Open source tool used to access remote hosts through an encrypted " -"communications channel, SSH key injection is supported by Compute." -msgstr "" -"暗号化した通信チャネル経由でリモートホストにアクセスするために使用されるオー" -"プンソースのツール。SSH 鍵インジェクションが Compute によりサポートされる。" - -msgid "Open the following HTML template in an editor of your choice:" -msgstr "以下の HTML テンプレートをお好きなエディターで開きます。" - -msgid "Open vSwitch" -msgstr "Open vSwitch" - -msgid "Open vSwitch (OVS) agent" -msgstr "Open vSwitch (OVS) エージェント" - -msgid "" -"Open vSwitch is a production quality, multilayer virtual switch licensed " -"under the open source Apache 2.0 license. It is designed to enable massive " -"network automation through programmatic extension, while still supporting " -"standard management interfaces and protocols (for example NetFlow, sFlow, " -"SPAN, RSPAN, CLI, LACP, 802.1ag)." -msgstr "" -"Open vSwitch は、商用品質、複数階層の仮想スイッチ。オープンソースの Apache " -"2.0 license に基づき許諾される。標準的な管理インターフェースやプロトコルと使" -"用ながら、プログラム拡張により大規模なネットワーク自動化を実現できるよう設計" -"されている (例えば、NetFlow、sFlow、SPAN、RSPAN、CLI、LACP、802.1ag)。" - -msgid "Open vSwitch neutron plug-in" -msgstr "Open vSwitch neutron プラグイン" - -msgid "OpenLDAP" -msgstr "OpenLDAP" - -msgid "OpenStack" -msgstr "OpenStack" - -msgid "" -"OpenStack APIs are open-source Python clients, and can run on Linux or Mac " -"OS X systems. On some client commands, you can specify a debug parameter to " -"show the underlying API request for the command. This is a good way to " -"become familiar with the OpenStack API calls." -msgstr "" -"OpenStack API は、オープンソースの Python クライアントです。Linux や Mac OS " -"X システムにおいて使用できます。いくつかのクライアントコマンドでは、デバッグ" -"パラメーターを指定して、そのコマンドの基盤となる API リクエストを表示できま" -"す。OpenStack API コールに慣れるには、この方法が便利です。" - -msgid "OpenStack Compute consists of the following areas and their components:" -msgstr "OpenStack Compute は、以下のコンポーネントから構成されます。" - -msgid "" -"OpenStack Compute interacts with OpenStack Identity for authentication; " -"OpenStack Image service for disk and server images; and OpenStack dashboard " -"for the user and administrative interface. Image access is limited by " -"projects, and by users; quotas are limited per project (the number of " -"instances, for example). OpenStack Compute can scale horizontally on " -"standard hardware, and download images to launch instances." -msgstr "" -"OpenStack Compute は、認証については OpenStack Identity サービスと、ディスク" -"やサーバーイメージについては OpenStack Image サービスと、ユーザーや管理者向け" -"インターフェースについては OpenStack Dashboard と連携して動作します。イメージ" -"アクセスはプロジェクトやユーザー単位で限定され、クォータ (例えば、インスタン" -"ス数) はプロジェクト単位に適用されます。 OpenStack Compute は標準的なハード" -"ウェアを使って水平にスケールさせることができます。イメージをダウンロードし" -"て、インスタンスを起動します。" - -msgid "" -"OpenStack Networking (neutron) allows you to create and attach interface " -"devices managed by other OpenStack services to networks. Plug-ins can be " -"implemented to accommodate different networking equipment and software, " -"providing flexibility to OpenStack architecture and deployment." -msgstr "" -"OpenStack Networking (neutron) を使うと、他の OpenStack サービスにより管理さ" -"れているインターフェースデバイスを作成して、ネットワークに接続できます。様々" -"なネットワーク装置やネットワークソフトウェアに対応するプラグインを実装するこ" -"とができ、 OpenStack のアーキテクチャーと環境に柔軟性をもたらします。" - -msgid "" -"OpenStack Networking agent that provides DHCP services for virtual networks." -msgstr "" -"仮想ネットワーク向けに DHCP サービスを提供する OpenStack Networking エージェ" -"ント。" - -msgid "" -"OpenStack Networking agent that provides layer-2 connectivity for virtual " -"networks." -msgstr "" -"仮想ネットワーク向けに L2 接続性を提供する OpenStack Networking エージェン" -"ト。" - -msgid "" -"OpenStack Networking agent that provides layer-3 (routing) services for " -"virtual networks." -msgstr "" -"仮想ネットワーク向けに L3 (ルーティング) サービスを提供する OpenStack " -"Networking エージェント。" - -msgid "" -"OpenStack Networking agent that provides metadata services for instances." -msgstr "" -"インスタンスにメタデータサービスを提供する OpenStack Networking エージェン" -"ト。" - -msgid "" -"OpenStack Networking mainly interacts with OpenStack Compute to provide " -"networks and connectivity for its instances." -msgstr "" -"OpenStack Networking は、おもに OpenStack Compute を連携して、コンピュートイ" -"ンスタンスにネットワークと接続性を提供します。" - -msgid "OpenStack Networking plug-ins and agents" -msgstr "OpenStack Networking プラグインおよびエージェント" - -msgid "OpenStack Services" -msgstr "OpenStack のサービス" - -msgid "OpenStack code name" -msgstr "OpenStack コード名" - -msgid "OpenStack distribution packages" -msgstr "OpenStack ディストリビューション" - -msgid "" -"OpenStack is a cloud operating system that controls large pools of compute, " -"storage, and networking resources throughout a data center, all managed " -"through a dashboard that gives administrators control while empowering their " -"users to provision resources through a web interface. OpenStack is an open " -"source project licensed under the Apache License 2.0." -msgstr "" -"OpenStack は、データセンター全体のコンピュートリソース、ストレージリソース、" -"ネットワークリソースの大規模なプールを制御する、クラウドオペレーティングシス" -"テム。管理者はすべてダッシュボードから制御できる。ユーザーは Web インター" -"フェースからリソースを配備できる。Apache License 2.0 に基づき許諾されるオープ" -"ンソースのプロジェクト。" - -msgid "OpenStack mailing lists" -msgstr "OpenStack メーリングリスト" - -msgid "" -"OpenStack project that aims to make cloud services easier to consume and " -"integrate with application development process by automating the source-to-" -"image process, and simplifying app-centric deployment. The project name is " -"solum." -msgstr "" -"クラウドサービスをより簡単に利用し、アプリケーション開発プロセスと統合するこ" -"とを目的とする OpenStack プロジェクト。ソースからイメージまでの手順を自動化" -"し、アプリケーション中心の開発を単純化します。プロジェクト名は solum。" - -msgid "" -"OpenStack project that aims to produce an OpenStack messaging service that " -"affords a variety of distributed application patterns in an efficient, " -"scalable and highly-available manner, and to create and maintain associated " -"Python libraries and documentation. The code name for the project is zaqar." -msgstr "" -"効率的、拡張可能、高可用な方法で、さまざまな分散アプリケーションのパターンを" -"提供する、OpenStack messaging service を開発することを目指している OpenStack " -"プロジェクト。また、関連する Python ライブラリーやドキュメントを作成してメン" -"テナンスする。このプロジェクトのコード名は zaqar。" - -msgid "" -"OpenStack project that produces a secret storage and generation system " -"capable of providing key management for services wishing to enable " -"encryption features. The code name of the project is barbican." -msgstr "" -"暗号化機能を有効化したいサービスに鍵管理機能を提供する機能を持つ、シークレッ" -"トストレージと生成システムを開発する OpenStack プロジェクト。このプロジェクト" -"の名前は barbican。" - -msgid "" -"OpenStack project that produces a set of Python libraries containing code " -"shared by OpenStack projects." -msgstr "" -"OpenStack プロジェクトに共有されるコードを含む Python ライブラリー群を作成す" -"る OpenStack プロジェクト。" - -msgid "OpenStack project that provides a Clustering service." -msgstr "クラスタリングサービスを提供する OpenStack プロジェクト。" - -msgid "OpenStack project that provides a Monitoring service." -msgstr "モニタリングサービスを提供する OpenStack プロジェクト。" - -msgid "" -"OpenStack project that provides a Software Development Lifecycle Automation " -"service." -msgstr "" -"ソフトウェア開発ライフサイクル自動化サービスを提供する OpenStack プロジェク" -"ト。" - -msgid "OpenStack project that provides a dashboard, which is a web interface." -msgstr "" -"ダッシュボードを提供する OpenStack プロジェクト。Web インターフェース。" - -msgid "" -"OpenStack project that provides a framework for performance analysis and " -"benchmarking of individual OpenStack components as well as full production " -"OpenStack cloud deployments. The code name of the project is rally." -msgstr "" -"各 OpenStack コンポーネント、本番の OpenStack 環境のパフォーマンス分析とベン" -"チマーク向けにフレームワークを提供する OpenStack プロジェクト。このプロジェク" -"トの名前は rally。" - -msgid "OpenStack project that provides a message service to applications." -msgstr "" -"メッセージサービスをアプリケーションに提供する OpenStack のプロジェクト。" - -msgid "" -"OpenStack project that provides a scalable data-processing stack and " -"associated management interfaces." -msgstr "" -"スケールアウト可能なデータ処理基盤と関連する管理インターフェースを提供する、" -"OpenStack のプロジェクト。" - -msgid "" -"OpenStack project that provides a scalable data-processing stack and " -"associated management interfaces. The code name for the project is sahara." -msgstr "" -"スケールアウト可能なデータ処理基盤と関連する管理インターフェースを提供する、" -"OpenStack のプロジェクト。プロジェクトのコード名は sahara です。" - -msgid "" -"OpenStack project that provides a set of services for management of " -"application containers in a multi-tenant cloud environment. The code name of " -"the project name is magnum." -msgstr "" -"マルチテナントクラウド環境において、アプリケーションコンテナーの管理サービス" -"を提供する、OpenStack のプロジェクト。プロジェクトのコード名は magnum です。" - -msgid "" -"OpenStack project that provides a simple YAML-based language to write " -"workflows, tasks and transition rules, and a service that allows to upload " -"them, modify, run them at scale and in a highly available manner, manage and " -"monitor workflow execution state and state of individual tasks. The code " -"name of the project is mistral." -msgstr "" -"ワークフロー、タスク、状態遷移ルールを書くための YAML ベースの言語を提供し、" -"それらをアップロード、編集できるサービス、それらを大規模かつ高可用に実行でき" -"るサービス、ワークフローの実行状態および個々のタスクの状態を管理および監視で" -"きるサービスを提供する OpenStack プロジェクト。このプロジェクトのコード名は " -"mistral。" - -msgid "OpenStack project that provides an Application catalog." -msgstr "アプリケーションカタログを提供する OpenStack のプロジェクト。" - -msgid "" -"OpenStack project that provides an application catalog service so that users " -"can compose and deploy composite environments on an application abstraction " -"level while managing the application lifecycle. The code name of the project " -"is murano." -msgstr "" -"ユーザーがアプリケーションのライフサイクルを管理しながら、アプリケーションの" -"抽象的なレベルで合成環境を作成して配備できるよう、アプリケーションカタログ" -"サービスを提供する OpenStack プロジェクト。このプロジェクトのコード名は " -"murano。" - -msgid "" -"OpenStack project that provides backup restore and disaster recovery as a " -"service." -msgstr "" -"バックアップリストアとディザスターリカバリーをサービスとして提供する " -"OpenStack プロジェクト。" - -msgid "OpenStack project that provides compute services." -msgstr "コンピュートサービスを提供する OpenStack プロジェクト。" - -msgid "OpenStack project that provides database services to applications." -msgstr "" -"データベースサービスをアプリケーションに提供する OpenStack のプロジェクト。" - -msgid "" -"OpenStack project that provides scalable, on demand, self service access to " -"authoritative DNS services, in a technology-agnostic manner. The code name " -"for the project is designate." -msgstr "" -"技術によらない方法で、権威 DNS サービスへの拡張可能、オンデマンド、セルフサー" -"ビスのアクセスを提供する OpenStack プロジェクト。このプロジェクトのコード名" -"は designate。" - -msgid "" -"OpenStack project that provides shared file systems as service to " -"applications." -msgstr "" -"共有ファイルシステムをアプリケーションに提供する OpenStack のプロジェクト。" - -msgid "OpenStack project that provides the Benchmark service." -msgstr "Benchmark service を提供する OpenStack プロジェクト。" - -msgid "OpenStack project that provides the Governance service." -msgstr "Governance service を提供する OpenStack プロジェクト。" - -msgid "OpenStack project that provides the Workflow service." -msgstr "ワークフローサービスを提供する OpenStack プロジェクト。" - -msgid "" -"OpenStack project that provisions bare metal, as opposed to virtual, " -"machines." -msgstr "" -"マシンを仮想とみなして、ベアメタルに展開する OpenStack のプロジェクト。" - -msgid "" -"OpenStack project that provisions bare metal, as opposed to virtual, " -"machines. The code name for the project is ironic." -msgstr "" -"マシンを仮想とみなして、ベアメタルに展開する OpenStack のプロジェクト。このプ" -"ロジェクトのコード名は ironic です。" - -msgid "" -"OpenStack project to provide Governance-as-a-Service across any collection " -"of cloud services in order to monitor, enforce, and audit policy over " -"dynamic infrastructure. The code name for the project is congress." -msgstr "" -"動的なインフラストラクチャー全体でポリシーを監視、強制、監査するために、さま" -"ざまなクラウドサービス群にわたり、Governance as a Service を提供する " -"OpenStack プロジェクト。このプロジェクトのコード名は congress。" - -msgid "" -"OpenStack provides an Infrastructure-as-a-Service (:term:`IaaS`) solution " -"through a set of interrelated services. Each service offers an application " -"programming interface (:term:`API `) that facilitates this integration. Depending on your needs, you can " -"install some or all services." -msgstr "" -"OpenStack は、互いに連携する一連のサービス群により Infrastructure-as-a-" -"Service (:term:`IaaS`) ソリューションを提供します。各サービスはこの統合を促" -"す :term:`API ` を提供します。必要に" -"応じて、いくつかのサービスをインストールすることもできますし、すべてのサービ" -"スをインストールすることもできます。" - -msgid "OpenStack services" -msgstr "OpenStack のサービス" - -msgid "OpenStack supports accessing the Amazon EC2 API through Compute." -msgstr "" -"OpenStack は、Compute 経由で Amazon EC2 API へのアクセスをサポートする。" - -msgid "" -"OpenStack supports encryption technologies such as HTTPS, SSH, SSL, TLS, " -"digital certificates, and data encryption." -msgstr "" -"OpenStack は、HTTPS、SSH、SSL、TLS、電子証明書、データ暗号化などの暗号化技術" -"をサポートします。" - -msgid "" -"OpenStack-on-OpenStack program. The code name for the OpenStack Deployment " -"program." -msgstr "" -"OpenStack-on-OpenStack プログラム。OpenStack Deployment プログラムのコード" -"名。" - -msgid "" -"Opens all objects for an object server and verifies the MD5 hash, size, and " -"metadata for each object." -msgstr "" -"あるオブジェクトサーバー用の全オブジェクトを開き、各オブジェクトの MD5 ハッ" -"シュ、サイズ、メタデータを検証する。" - -msgid "" -"Optionally, you can specify a name for the transfer by using the ``--display-" -"name displayName`` parameter." -msgstr "" -"オプションとして、``--display-name displayName`` パラメーターを使用して転送の" -"名前を指定することができます。" - -msgid "" -"Orchestrates multiple composite cloud applications by using either the " -"native HOT template format or the AWS CloudFormation template format, " -"through both an OpenStack-native REST API and a CloudFormation-compatible " -"Query API." -msgstr "" -"OpenStack ネイティブの REST API および CloudFormation 互換のクエリー API 経由" -"で、ネイティブの HOT テンプレート形式または AWS CloudFormation テンプレート形" -"式を使用することで、複数の混合クラウドアプリケーションを統合します。" - -msgid "" -"Orchestrates the launching of templates and provides events back to the API " -"consumer." -msgstr "テンプレートの起動全体を指揮し、API 利用者に返すイベントを生成します。" - -msgid "Orchestration service" -msgstr "Orchestration サービス" - -msgid "" -"Organizes and stores objects in Object Storage. Similar to the concept of a " -"Linux directory but cannot be nested. Alternative term for an Image service " -"container format." -msgstr "" -"Object Storage でオブジェクトを整理して保存する。Linux のディレクトリと似てい" -"るが、入れ子にできない。Image service のコンテナー形式の別名。" - -msgid "Oslo" -msgstr "Oslo" - -msgid "Override environment variable values" -msgstr "環境変数の値の上書き" - -msgid "Overview" -msgstr "概要" - -msgid "PCI passthrough" -msgstr "PCI パススルー" - -msgid "Package" -msgstr "パッケージ" - -msgid "" -"Pages that use HTML, JavaScript, and Cascading Style Sheets to enable users " -"to interact with a web page or show simple animation." -msgstr "" -"ユーザーが Web ページと通信したり、簡単なアニメーションを表示したりするため" -"に、HTML、JavaScript、CSS を使用するページ。" - -msgid "" -"Passed to API requests and used by OpenStack to verify that the client is " -"authorized to run the requested operation." -msgstr "" -"クライアントが要求した操作を実行する権限を持つことを検証するために、API リク" -"エストに渡され、OpenStack により使用される。" - -msgid "" -"Passes requests from clients to the appropriate workers and returns the " -"output to the client after the job completes." -msgstr "" -"クライアントからのリクエストを適切なワーカーに渡す。ジョブ完了後、出力をクラ" -"イアントに返す。" - -msgid "" -"Performs housekeeping tasks on the large data store. The replication " -"services ensure consistency and availability through the cluster. Other " -"periodic processes include auditors, updaters, and reapers." -msgstr "" -"大規模なデータストアでハウスキーピング作業を実行します。複製サービスにより、" -"クラスター全体で一貫性と可用性が確保されます。他の定期的なプロセスに " -"auditor、 updater, reaper などがあります。" - -msgid "Persists until VM is terminated" -msgstr "仮想マシンが削除されるまで存続" - -msgid "Persists until deleted" -msgstr "削除されるまで存続" - -msgid "Physical host dedicated to running compute nodes." -msgstr "コンピュートノード実行専用の物理ホスト。" - -msgid "Plans deployments." -msgstr "デプロイメント計画の立案。" - -msgid "Platform-as-a-Service (PaaS)" -msgstr "Platform-as-a-Service (PaaS)" - -msgid "Pluggable system of Hadoop installation engines." -msgstr "プラグイン型の、Hadoop インストールエンジン。" - -msgid "" -"Plugs and unplugs ports, creates networks or subnets, and provides IP " -"addressing. These plug-ins and agents differ depending on the vendor and " -"technologies used in the particular cloud. OpenStack Networking ships with " -"plug-ins and agents for Cisco virtual and physical switches, NEC OpenFlow " -"products, Open vSwitch, Linux bridging, and the VMware NSX product." -msgstr "" -"ポートの接続と切断、ネットワークやサブネットの作成、 IP アドレスの提供を行い" -"ます。これらのプラグインとエージェントは、それぞれのクラウドにおいて使用され" -"るベンダーと技術により異なります。OpenStack Networking には、 Cisco の仮想ス" -"イッチと物理スイッチ、NEC OpenFlow 製品、Open vSwitch、Linux ブリッジ、" -"VMware NSX 製品用のプラグインとエージェントが同梱されています。" - -msgid "" -"Point in time since the last container and accounts database sync among " -"nodes within Object Storage." -msgstr "" -"最新のコンテナーとアカウントのデータベースが Object Storage 内のノード間で同" -"期された基準時間。" - -msgid "Possible use cases for data migration include:" -msgstr "データ移動で考えられるユースケースは、以下のとおりです。" - -msgid "" -"Predefined templates of Hadoop configurations with the ability to modify " -"parameters." -msgstr "" -"Hadoop 設定の事前定義済みテンプレート。パラメーターを変更する機能があります。" - -msgid "Prerequisite" -msgstr "前提" - -msgid "Prevents an image from being deleted." -msgstr "イメージが削除されることを防ぎます。" - -msgid "" -"Principal communications protocol in the internet protocol suite for " -"relaying datagrams across network boundaries." -msgstr "" -"ネットワーク境界を越えてデータグラムを中継するための、インターネットプロトコ" -"ルにおける中心的な通信プロトコル。" - -msgid "Prints the image size in a human-friendly format." -msgstr "人間が理解しやすい形式でイメージの容量を表示します。" - -msgid "" -"Processes client requests for VMs, updates Image service metadata on the " -"registry server, and communicates with the store adapter to upload VM images " -"from the back-end store." -msgstr "" -"仮想マシンに対するクライアントリクエスト、レジストリーサーバーにおける Image " -"service のメタデータの更新、バックエンドストアから仮想マシンイメージをアップ" -"ロードするためのストアアダプターを用いた通信を処理する。" - -msgid "" -"Processing is fairly complex. Basically, the daemon accepts actions from the " -"queue and performs a series of system commands such as launching a KVM " -"instance and updating its state in the database." -msgstr "" -"処理内容はかなり複雑です。このデーモンはキューからアクションを受け取り、 KVM " -"インスタンスの起動やデータベースの状態更新など一連のシステムコマンドを実行し" -"ます。" - -msgid "Programming language used extensively in OpenStack." -msgstr "OpenStack において幅広く使用されるプログラミング言語。" - -msgid "Project name" -msgstr "プロジェクト名" - -msgid "" -"Project name for OpenStack Network Information Service. To be merged with " -"Networking." -msgstr "" -"OpenStack Network Information Service のプロジェクト名。Networking と統合予" -"定。" - -msgid "Projects" -msgstr "プロジェクト" - -msgid "" -"Projects represent the base unit of “ownership” in OpenStack, in that all " -"resources in OpenStack should be owned by a specific project. In OpenStack " -"Identity, a project must be owned by a specific domain." -msgstr "" -"プロジェクトは OpenStack における「所有権」の基本的な単位で、OpenStack におけ" -"るあらゆるリソースは何らかのテナントに属する。 OpenStack Identity では、プロ" -"ジェクトは特定のドメインに何らかのドメインに属する。" - -msgid "" -"Protocol that encapsulates a wide variety of network layer protocols inside " -"virtual point-to-point links." -msgstr "" -"仮想のポイントツーポイントリンク内で、さまざまなネットワーク層のプロトコルを" -"カプセル化するプロトコル。" - -msgid "" -"Provide as much detail as possible in the description. Paste in your command " -"output or stack traces, links to screen shots, and any other information " -"which might be useful." -msgstr "" -"できるだけ詳細な情報を記入してください。コマンドの出力結果やスタックトレー" -"ス、スクリーンショットへのリンク、その他有用な情報などがいいでしょう。" - -msgid "" -"Provided by Compute in the form of cloudpipes, specialized instances that " -"are used to create VPNs on a per-project basis." -msgstr "" -"Compute では cloudpipe の形で提供される。 cloudpipe では、特別なインスタンス" -"を使って、プロジェクト毎に VPN が作成される。" - -msgid "Provided in Compute through the system usage data facility." -msgstr "システム使用状況データ機能経由で Compute において提供される。" - -msgid "" -"Provides Shared File System service via nfs, cifs, glusterfs, or hdfs " -"protocol" -msgstr "" -"NFS, CIFS, GlusterFS, HDFS プロトコル経由で共有ファイルシステムが提供される" - -msgid "" -"Provides a method of allocating space on mass-storage devices that is more " -"flexible than conventional partitioning schemes." -msgstr "" -"伝統的なパーティションスキーマよりも柔軟に、大規模ストレージデバイスに領域を" -"割り当てる方式を提供する。" - -msgid "" -"Provides a predefined list of actions that the user can perform, such as " -"start or stop VMs, reset passwords, and so on. Supported in both Identity " -"and Compute and can be configured using the horizon dashboard." -msgstr "" -"仮想マシンの起動や停止、パスワードの初期化など、ユーザーが実行できる操作の事" -"前定義済み一覧を提供する。Identity と Compute においてサポートされる。ダッ" -"シュボードを使用して設定できる。" - -msgid "" -"Provides a proxy for accessing running instances through a SPICE connection. " -"Supports browser-based HTML5 client." -msgstr "" -"SPICE コネクション経由で実行中のインスタンスのアクセスへプロキシーを提供しま" -"す。ブラウザーベースの HTML5 クライアントをサポートします。" - -msgid "" -"Provides a proxy for accessing running instances through a VNC connection. " -"Supports an OpenStack-specific Java client." -msgstr "" -"VNC コネクション経由で実行中のインスタンスのアクセスへプロキシーを提供しま" -"す。 OpenStack 固有の Java クライアントをサポートします。" - -msgid "" -"Provides a proxy for accessing running instances through a VNC connection. " -"Supports browser-based novnc clients." -msgstr "" -"VNC コネクション経由で実行中のインスタンスへアクセスするプロキシーを提供しま" -"す。ブラウザーベースの novnc クライアントをサポートします。" - -msgid "" -"Provides a web-based self-service portal to interact with underlying " -"OpenStack services, such as launching an instance, assigning IP addresses " -"and configuring access controls." -msgstr "" -"インスタンスの起動、IP アドレスの割り当て、アクセス制御の設定など、 " -"OpenStack サービスを操作するために、ウェブベースのセルフサービスポータルを提" -"供します。" - -msgid "" -"Provides an OpenStack-native RESTful API that supports JSON to provision and " -"manage Trove instances." -msgstr "" -"Trove インスタンスの作成と管理を行うための、JSON に対応した OpenStack 固有の " -"RESTful API を提供します。" - -msgid "" -"Provides an authentication and authorization service for other OpenStack " -"services. Provides a catalog of endpoints for all OpenStack services." -msgstr "" -"他の OpenStack サービスに対して認証および認可サービスを提供します。すべての " -"OpenStack サービスに対してエンドポイントのカタログを提供します。" - -msgid "" -"Provides an interface to the underlying Open vSwitch service for the " -"Networking plug-in." -msgstr "" -"Networking のプラグインに対して、バックエンドの Open vSwitch サービスへのイン" -"ターフェースを提供する。" - -msgid "" -"Provides capabilities to provision and scale Hadoop clusters in OpenStack by " -"specifying parameters like Hadoop version, cluster topology and nodes " -"hardware details." -msgstr "" -"OpenStack においてスケール可能な Hadoop クラスターを展開する機能を提供しま" -"す。Hadoop のバージョン、クラスタートポロジー、ノードのハードウェアの詳細など" -"のパラメーターを指定します。" - -msgid "" -"Provides data redundancy and fault tolerance by creating copies of Object " -"Storage objects, accounts, and containers so that they are not lost when the " -"underlying storage fails." -msgstr "" -"Object Storage のオブジェクト、アカウント、コンテナーのコピーを作成すること" -"で、データ冗長性や耐障害性を実現する。これにより、バックエンドのストレージが" -"故障した場合でもデータは失わない。" - -msgid "" -"Provides logical partitioning of Compute resources in a child and parent " -"relationship. Requests are passed from parent cells to child cells if the " -"parent cannot provide the requested resource." -msgstr "" -"親子関係で Compute リソースの論理パーティションを提供する。親セルが要求された" -"リソースを提供できない場合、親セルからのリクエストは子セルに渡される。" - -msgid "" -"Provides persistent block storage to running instances. Its pluggable driver " -"architecture facilitates the creation and management of block storage " -"devices." -msgstr "" -"実行中のインスタンスに永続的なブロックストレージを提供します。そのプラグイン" -"可能なドライバーアーキテクチャーにより、ブロックストレージデバイスの作成と管" -"理が簡単に行えます。" - -msgid "" -"Provides scalable and reliable Cloud Database-as-a-Service functionality for " -"both relational and non-relational database engines." -msgstr "" -"リレーショナルデータベースと非リレーショナルデータベースの両エンジン用にス" -"ケール可能かつ信頼できるクラウド Database-as-a-Service を提供します。" - -msgid "Provides support for NexentaStor devices in Compute." -msgstr "Compute において NexentaStor デバイスのサポートを提供する。" - -msgid "Provides support for Open vSwitch in Networking." -msgstr "Networking で Open vSwitch のサポートを提供する。" - -msgid "Provides support for VMware NSX in Neutron." -msgstr "Neutron における VMware NSX サポートを提供する。" - -msgid "" -"Provides support for new and specialized types of back-end storage for the " -"Block Storage volume manager." -msgstr "" -"Block Storage のボリュームマネージャーに対して、新しい特別な種類のバックエン" -"ドストレージのサポートを提供する。" - -msgid "" -"Provides to the consumer the ability to deploy applications through a " -"programming language or tools supported by the cloud platform provider. An " -"example of Platform-as-a-Service is an Eclipse/Java programming platform " -"provided with no downloads required." -msgstr "" -"クラウドプラットフォームプロバイダーによりサポートされるプログラミング言語や" -"ツールを用いてアプリケーションを配備する機能を利用者に提供する。PaaS の例は、" -"ダウンロードする必要がない、Eclipse/Java プログラミングプラットフォームです。" - -msgid "Proxy servers (swift-proxy-server)" -msgstr "プロキシサーバー (swift-proxy-server)" - -msgid "" -"Publishes collected data to various targets including data stores and " -"message queues." -msgstr "" -"収集したデータを、データストアやメッセージキューなどの様々な宛先に発行しま" -"す。" - -msgid "Puppet" -msgstr "Puppet" - -msgid "Python" -msgstr "Python" - -msgid "Python 2.7 or later" -msgstr "Python 2.7 以降" - -msgid "QEMU Copy On Write 2 (QCOW2)" -msgstr "QEMU Copy On Write 2 (QCOW2)" - -msgid "" -"QEMU is a generic and open source machine emulator and virtualizer. One of " -"the hypervisors supported by OpenStack, generally used for development " -"purposes." -msgstr "" -"QEMU は、汎用のオープンソースのマシンエミュレーターと仮想化ソフトウェアです。" -"OpenStack がサポートするハイパーバイザーの一つ。一般に、開発目的で使用され" -"る。" - -msgid "Qpid" -msgstr "Qpid" - -msgid "Quick EMUlator (QEMU)" -msgstr "Quick EMUlator (QEMU)" - -msgid "RADOS Block Device (RBD)" -msgstr "RADOS Block Device (RBD)" - -msgid "RAM filter" -msgstr "RAM フィルター" - -msgid "RAM overcommit" -msgstr "RAM オーバーコミット" - -msgid "" -"RDO, openSUSE, SUSE Linux Enterprise, Debian, and Ubuntu have client " -"packages that can be installed without ``pip``." -msgstr "" -"RDO、openSUSE、SUSE Linux Enterprise、Debian、Ubuntu は、``pip`` なしで利用で" -"きるクライアントパッケージがあります。" - -msgid "REST API" -msgstr "REST API" - -msgid "RESTful" -msgstr "RESTful" - -msgid "RPC driver" -msgstr "RPC ドライバー" - -msgid "RXTX cap" -msgstr "RXTX キャップ" - -msgid "RXTX quota" -msgstr "RXTX クォータ" - -msgid "RabbitMQ" -msgstr "RabbitMQ" - -msgid "Rackspace Cloud Files" -msgstr "Rackspace Cloud Files" - -msgid "Rating service" -msgstr "Rating サービス" - -msgid "Rating service." -msgstr "Rating サービス。" - -msgid "Recon" -msgstr "recon" - -msgid "Red Hat Enterprise Linux (RHEL)" -msgstr "Red Hat Enterprise Linux (RHEL)" - -msgid "" -"Reducing the size of files by special encoding, the file can be decompressed " -"again to its original content. OpenStack supports compression at the Linux " -"file system level but does not support compression for things such as Object " -"Storage objects or Image service VM images." -msgstr "" -"特別なエンコーディングによりファイル容量を減らすこと。このファイルは、元の内" -"容に展開できます。OpenStack は、Linux ファイルシステムレベルの圧縮をサポート" -"しますが、Object Storage のオブジェクトや Image service の仮想マシンイメージ" -"などの圧縮をサポートしません。" - -msgid "Released as open source by NASA in 2010 and is the basis for Compute." -msgstr "" -"2010 年に NASA によりオープンソースとしてリリースされた。Compute の基になっ" -"た。" - -msgid "" -"Released as open source by Rackspace in 2010; the basis for Object Storage." -msgstr "" -"Rackspace により 2010 年にオープンソースとして公開された。Object Storage の" -"ベース。" - -msgid "Reliable, Autonomic Distributed Object Store" -msgstr "Reliable, Autonomic Distributed Object Store" - -msgid "Remote Procedure Call (RPC)" -msgstr "Remote Procedure Call (RPC)" - -msgid "" -"Removes all data on the server and replaces it with the specified image. " -"Server ID and IP addresses remain the same." -msgstr "" -"サーバからすべてのデータを消去し、特定のイメージで置き換える。サーバのIDとIP" -"アドレスは変更されない。" - -msgid "Represents a virtual, isolated OSI layer-2 subnet in Networking." -msgstr "Networking における仮想の分離された OSI L-2 サブネットを表す。" - -msgid "Resize a volume" -msgstr "ボリュームのリサイズ" - -msgid "" -"Resize the volume by passing the volume ID and the new size (a value greater " -"than the old one) as parameters:" -msgstr "" -"パラメーターとしてボリューム ID と新しいサイズ (以前のボリュームよりも大きい" -"サイズ) を指定して、ボリュームをリサイズします。" - -msgid "Restart Apache for this change to take effect." -msgstr "Apache を再起動して、この変更を反映します。" - -msgid "Restart the Apache service." -msgstr "Apache サービスを再起動します。" - -msgid "Role Based Access Control (RBAC)" -msgstr "Role Based Access Control (RBAC)" - -msgid "Routes information between the Block Storage processes." -msgstr "Block Storage プロセス間で情報を転送します。" - -msgid "Routes information between the Shared File Systems processes." -msgstr "Shared File Systems プロセス間の情報を中継します。" - -msgid "Run the following command to discover the version number for a client:" -msgstr "" -"クライアントのバージョン番号を確認するために、以下のコマンドを実行します。" - -msgid "Runs automated tests against the core OpenStack API; written in Rails." -msgstr "" -"コア OpenStack API に対して自動テストを実行する。Rails で書かれている。" - -msgid "" -"Runs on a central management server and determines when to fire alarms. The " -"alarms are generated based on defined rules against events, which are " -"captured by the Telemetry Data Collection service's notification agents." -msgstr "" -"中央管理サーバーで実行され、通知を発動するタイミングを判断します。通知は、イ" -"ベントに対して定義されたルールに基づいて生成されます。これは、Telemetry Data " -"Collection サービスの通知エージェントにより収集されます。" - -msgid "" -"Runs on a central management server to poll for resource utilization " -"statistics for resources not tied to instances or compute nodes. Multiple " -"agents can be started to scale service horizontally." -msgstr "" -"中央管理サーバーで実行され、インスタンスやコンピュートノードに関連付いていな" -"いリソースの使用統計をポーリングします。複数のエージェントを実行し、水平に" -"サービスをスケールさせることができます。" - -msgid "" -"Runs on a central management server(s) and consumes messages from the " -"message queue(s) to build event and metering data." -msgstr "" -"中央管理サーバーで実行され、メッセージキューからメッセージを読みだして、イベ" -"ントや計測データを作成します。" - -msgid "" -"Runs on central management server(s) and dispatches collected telemetry data " -"to a data store or external consumer without modification." -msgstr "" -"中央管理サーバーで実行され、収集した計測データを、データストアや通知を使わな" -"い外部の使用者に送り出します。" - -msgid "" -"Runs on each compute node and polls for resource utilization statistics. " -"There may be other types of agents in the future, but for now our focus is " -"creating the compute agent." -msgstr "" -"各コンピュートノードで実行され、リソース使用統計をポーリングします。将来的に" -"は他の種類のエージェントも作成されるかもしれませんが、現時点ではコンピュート" -"エージェントに注力しています。" - -msgid "" -"Runs on one or more central management servers to allow alarms to be set " -"based on the threshold evaluation for a collection of samples." -msgstr "" -"1つ以上の中央管理サーバーで実行され、収集したサンプルデータに対する閾値の評価" -"を行い、アラームをセットします。" - -msgid "" -"Runs on one or more central management servers to determine when alarms fire " -"due to the associated statistic trend crossing a threshold over a sliding " -"time window." -msgstr "" -"1つ以上の中央管理サーバーで実行され、関連する統計の傾向がスライディングタイム" -"ウィンドウで閾値に違反した場合にアラームを発行するかを判定します。" - -msgid "" -"Runs on one or more central management servers to provide access to the " -"alarm information stored in the data store." -msgstr "" -"1 つ以上の中央管理サーバーで実行され、データストアへのデータアクセス手段を提" -"供します。" - -msgid "" -"Runs on one or more central management servers to provide data access from " -"the data store." -msgstr "" -"1つ以上の中央管理サーバーで実行され、データストアへのデータアクセス手段を提供" -"します。" - -msgid "" -"Runs on the host, and receives messages from guest instances that want to " -"update information on the host." -msgstr "" -"ホストで実行され、ホストにおいてアップデートしたい情報のメッセージをゲストイ" -"ンスタンスから受信します。" - -msgid "Runs operating systems and provides scratch space" -msgstr "オペレーティングシステムを実行し、新規領域を提供する" - -msgid "" -"Runs within the guest instance. Manages and performs operations on the " -"database itself." -msgstr "" -"ゲストインスタンスの中で実行します。データベース自身の処理を管理、実行しま" -"す。" - -msgid "S3" -msgstr "S3" - -msgid "SAML assertion" -msgstr "SAML アサーション" - -msgid "SELinux" -msgstr "SELinux" - -msgid "" -"SINA standard that defines a RESTful API for managing objects in the cloud, " -"currently unsupported in OpenStack." -msgstr "" -"クラウドにあるオブジェクトを管理するための RESTful API を定義する SINA 標準。" -"現在 OpenStack ではサポートされていない。" - -msgid "SNAPSHOT" -msgstr "SNAPSHOT" - -msgid "SPICE" -msgstr "SPICE" - -msgid "SQL database" -msgstr "SQL データベース" - -msgid "SQL-Alchemy" -msgstr "SQL-Alchemy" - -msgid "SQLite" -msgstr "SQLite" - -msgid "SUSE Linux Enterprise Server (SLES)" -msgstr "SUSE Linux Enterprise Server (SLES)" - -msgid "" -"Script that initializes the building of the ring file, takes daemon names as " -"parameter and offers commands. Documented in http://docs.openstack.org/" -"developer/swift/admin_guide.html#managing-services." -msgstr "" -"リングファイルを初期化し、パラメーターとしてデーモンの名前を受け付け、コマン" -"ドを提供するスクリプトです。ドキュメントは http://docs.openstack.org/" -"developer/swift/admin_guide.html#managing-services にあります。" - -msgid "See API endpoint." -msgstr "API エンドポイントを参照。" - -msgid "See access control list." -msgstr "「アクセス制御リスト」参照。" - -msgid "" -"Selects the optimal storage provider node on which to create the volume. A " -"similar component to the ``nova-scheduler``." -msgstr "" -"ボリュームを作成するのに適切なストレージプロバイダーノードを選択します。 " -"``nova-scheduler`` と同様のコンポーネントです。" - -msgid "" -"Send the volume transfer ID and authorization key to the new owner (for " -"example, by email)." -msgstr "" -"ボリューム転送 ID と認証キーを新しい所有者に送信します (例: 電子メール)。" - -msgid "Server" -msgstr "サーバー" - -msgid "Service" -msgstr "サービス" - -msgid "Service Level Agreement (SLA)" -msgstr "サービス水準合意 (SLA; Service Level Agreement)" - -msgid "Set environment variables using the OpenStack RC file" -msgstr "OpenStack RC ファイルを用いた環境変数の設定" - -msgid "" -"Set of bits that make up a single character; there are usually 8 bits to a " -"byte." -msgstr "1 つの文字を構成するビットの組。通常は 8 ビットで 1 バイトになる。" - -msgid "" -"Set the HTML title, which appears at the top of the browser window, by " -"adding the following line to ``local_settings.py``:" -msgstr "" -"``local_settings.py`` に以下の行を追加して、HTML タイトルを設定します。 HTML " -"タイトルは、ブラウザーウィンドウの上部に表示されます。" - -msgid "" -"Setting for the Compute RabbitMQ message delivery mode; can be set to either " -"transient or persistent." -msgstr "" -"Compute RabbitMQ メッセージ配信モード用設定。transient(一時)又は " -"persistent(永続)のいずれかを設定できる。" - -msgid "Shared File Systems API" -msgstr "共有ファイルシステム API" - -msgid "Shared File Systems service" -msgstr "Shared File Systems サービス" - -msgid "Sheepdog" -msgstr "Sheepdog" - -msgid "Show host usage statistics" -msgstr "ホストの使用統計の表示" - -msgid "Show information for your volume:" -msgstr "お使いのボリュームの情報を表示します。" - -msgid "Show instance usage statistics" -msgstr "インスタンスの使用統計の表示" - -msgid "Show usage statistics for hosts and instances" -msgstr "ホストおよびインスタンスの使用統計の表示" - -msgid "" -"Similar to :option:`--location` in usage, but indicates that the image " -"server should immediately copy the data and store it in its configured image " -"store." -msgstr "" -"使用方法は :option:`--location` に似ていますが、イメージサーバーはすぐにデー" -"タをコピーして設定済みのイメージストアに保存する必要があると指定します。" - -msgid "" -"Similar to the ``nova-compute`` service, accepts networking tasks from the " -"queue and manipulates the network. Performs tasks such as setting up " -"bridging interfaces or changing IPtables rules." -msgstr "" -"``nova-compute`` と似ています。 キューからネットワーク関係のタスクを受け取" -"り、ネットワーク操作を行ないます。例えば、ブリッジインターフェースの準備や " -"iptables ルールの変更などのタスクを行います。" - -msgid "Simple Cloud Identity Management (SCIM)" -msgstr "Simple Cloud Identity Management (SCIM)" - -msgid "" -"Since the installation process compiles source files, this requires the " -"related Python development package for your operating system and " -"distribution." -msgstr "" -"インストール中にソースファイルをコンパイルするため、お使いのオペレーティング" -"システム環境に応じた、関連する Python 開発パッケージが必要になります。" - -msgid "Single-root I/O Virtualization (SR-IOV)" -msgstr "Single-root I/O Virtualization (SR-IOV)" - -msgid "Site colors" -msgstr "サイトのカラー" - -msgid "Sizing based on need" -msgstr "必要なサイズを指定" - -msgid "Sizings based on need" -msgstr "必要なサイズを指定" - -msgid "SmokeStack" -msgstr "SmokeStack" - -msgid "" -"Soft limit on the amount of network traffic a Compute VM instance can send " -"and receive." -msgstr "" -"Compute の仮想マシンインスタンスが送受信できるネットワーク通信量のソフト制" -"限。" - -msgid "Software Development Lifecycle Automation service" -msgstr "ソフトウェア開発ライフサイクル自動化サービス" - -msgid "" -"Software component providing the actual implementation for Networking APIs, " -"or for Compute APIs, depending on the context." -msgstr "" -"利用形態に応じた、Networking API や Compute API の具体的な実装を提供するソフ" -"トウェアコンポーネント。" - -msgid "" -"Software that arbitrates and controls VM access to the actual underlying " -"hardware." -msgstr "VM のアクセスを実際の下位ハードウェアに仲介して制御するソフトウェア。" - -msgid "" -"Software that enables multiple VMs to share a single physical NIC within " -"Compute." -msgstr "" -"複数の仮想マシンが Compute 内で単一の物理 NIC を共有するためのソフトウェア。" - -msgid "" -"Software that runs on a host or node and provides the features and functions " -"of a hardware-based network switch." -msgstr "" -"ホストやノードで実行され、ハードウェアのネットワークスイッチの機能を提供する" -"ソフトウェア。" - -msgid "SolidFire Volume Driver" -msgstr "SolidFire Volume Driver" - -msgid "Some tips:" -msgstr "使いこなすヒント:" - -msgid "Something you must be aware of before proceeding." -msgstr "続行する前に注意が必要なものです。" - -msgid "" -"Special tenant that contains all services that are listed in the catalog." -msgstr "カタログに一覧化される全サービスを含む特別なテナント。" - -msgid "" -"Specification for managing identity in the cloud, currently unsupported by " -"OpenStack." -msgstr "" -"クラウドで認証情報を管理するための仕様。現在、OpenStack によりサポートされて" -"いない。" - -msgid "" -"Specifies additional requirements when Compute determines where to start a " -"new instance. Examples include a minimum amount of network bandwidth or a " -"GPU." -msgstr "" -"Compute が新しいインスタンスを起動する場所を判断するとき、追加の要件を指定す" -"る。例えば、ネットワーク帯域の最小量、GPU などがある。" - -msgid "" -"Specifies the authentication source used by Image service or Identity. In " -"the Database service, it refers to the extensions implemented for a data " -"store." -msgstr "" -"Image サービスや Identity サービスが使用する認証元を指定する。 Database サー" -"ビスでは、データストア用に実装された拡張を指す。" - -msgid "StackTach" -msgstr "StackTach" - -msgid "Standard for packaging VM images. Supported in OpenStack." -msgstr "仮想マシンイメージのパッケージ化の標準。OpenStack でサポートされる。" - -msgid "StaticWeb" -msgstr "StaticWeb" - -msgid "Storage concepts" -msgstr "ストレージの考え方" - -msgid "" -"Storage protocol similar in concept to TCP/IP; encapsulates SCSI commands " -"and data." -msgstr "" -"TCP/IP に似た概念のストレージプロトコル。SCSI コマンドとデータをカプセル化す" -"る。" - -msgid "" -"Storage protocol that encapsulates SCSI frames for transport over IP " -"networks." -msgstr "" -"IP ネットワーク上で転送するために、SCSI フレームをカプセル化するストレージプ" -"ロトコル。" - -msgid "Storage repository for image files" -msgstr "イメージファイル用のストレージリポジトリー" - -msgid "" -"Storage ring build and rebalance utility. Documented in http://docs." -"openstack.org/developer/swift/admin_guide.html#managing-the-rings." -msgstr "" -"ストレージリングの作成とリバランスを行うツール。ドキュメントは http://docs." -"openstack.org/developer/swift/admin_guide.html#managing-the-rings にありま" -"す。" - -msgid "Storage types" -msgstr "ストレージ種別" - -msgid "Stores CephFS metadata." -msgstr "CephFS メタデータを格納する。" - -msgid "" -"Stores and retrieves arbitrary unstructured data objects via a RESTful, HTTP " -"based API. It is highly fault tolerant with its data replication and scale-" -"out architecture. Its implementation is not like a file server with " -"mountable directories. In this case, it writes objects and files to multiple " -"drives, ensuring the data is replicated across a server cluster." -msgstr "" -"RESTful な HTTP ベースの API 経由で任意の非構造データオブジェクトを保存および" -"取得します。データ複製による高い耐障害性と、スケールアウトアーキテクチャーを" -"持ちます。その実装は、マウント可能なディレクトリを持つファイルサーバーとは異" -"なります。オブジェクトやファイルを複数のドライブに書き込み、データがサーバク" -"ラスター間で複製されていることを保証します。" - -msgid "" -"Stores and retrieves virtual machine disk images. OpenStack Compute makes " -"use of this during instance provisioning." -msgstr "" -"仮想マシンディスクイメージを保存および取得します。OpenStack Compute がインス" -"タンスの配備中に使用します。" - -msgid "" -"Stores image metadata and you can choose your database depending on your " -"preference. Most deployments use MySQL or SQLite." -msgstr "" -"イメージのメタデータを保存します。お好みのデータベースを選択できます。多くの" -"環境では MySQL か SQLite を使用します。" - -msgid "" -"Stores most build-time and run-time states for a cloud infrastructure, " -"including:" -msgstr "" -"クラウドインフラの、作成中、実行中のほとんどの状態を保持します。例えば以下の" -"ような情報があります。" - -msgid "" -"Stores, processes, and retrieves metadata about images. Metadata includes " -"items such as size and type." -msgstr "" -"イメージに関するメタデータの保存、処理、取得を行います。メタデータは容量や形" -"式などの項目があります。" - -msgid "" -"String of text known only by the user; used along with an access key to make " -"requests to the Compute API." -msgstr "" -"ユーザーのみが知っているテキスト文字列。リクエストを Compute API に発行するた" -"めに、アクセスキーと一緒に使用される。" - -msgid "Subdivides physical CPUs. Instances can then use those divisions." -msgstr "" -"物理 CPU を分割する。インスタンスは、これらの分割したものを使用できる。" - -msgid "Support for different Hadoop distributions:" -msgstr "さまざまな Hadoop ディストリビューションをサポートしています。" - -msgid "Supported model values" -msgstr "サポートされるモデルの値" - -msgid "Supports interaction with VMware products in Compute." -msgstr "Compute で VMware 製品の操作をサポートする。" - -msgid "" -"Takes a virtual machine instance request from the queue and determines on " -"which compute server host it runs." -msgstr "" -"仮想マシンインスタンスの作成要求を受け取ってキューイングし、インスタンスをど" -"のコンピュートサーバーホストで実行するかを決定します。" - -msgid "Telemetry Alarming service" -msgstr "Telemetry Alarming サービス" - -msgid "Telemetry Data Collection service" -msgstr "Telemetry Data Collection サービス" - -msgid "Telemetry service" -msgstr "Telemetry サービス" - -msgid "Telemetry v3" -msgstr "Telemetry v3" - -msgid "TempAuth" -msgstr "TempAuth" - -msgid "TempURL" -msgstr "TempURL" - -msgid "Tempest" -msgstr "Tempest" - -msgid "Tenant API" -msgstr "テナント API" - -msgid "" -"Term for an Object Storage process that runs for a long time. Can indicate a " -"hung process." -msgstr "" -"長時間動作している Object Storage のプロセスを指す用語。ハングしたプロセスを" -"意味する可能性もある。" - -msgid "" -"Term used in the OSI network architecture for the data link layer. The data " -"link layer is responsible for media access control, flow control and " -"detecting and possibly correcting errors that may occur in the physical " -"layer." -msgstr "" -"OSI ネットワークアーキテクチャーにおけるデータリンク層に使用される用語。デー" -"タリンク層は、メディアアクセス制御、フロー制御、物理層で発生する可能性のある" -"エラー検知、できる限りエラー訂正に責任を持つ。" - -msgid "" -"Term used in the OSI network architecture for the network layer. The network " -"layer is responsible for packet forwarding including routing from one node " -"to another." -msgstr "" -"OSI ネットワークアーキテクチャーにおけるネットワーク層に使用される用語。ネッ" -"トワーク層は、パケット転送、あるノードから別のノードへのルーティングに責任を" -"持つ。" - -msgid "The :command:`delete` command does not return any output." -msgstr ":command:`delete` コマンドは何も出力しません。" - -msgid "The :command:`extend` command does not return any output." -msgstr ":command:`extend` コマンドは何も出力しません。" - -msgid "The :command:`volume-detach` command does not return any output." -msgstr ":command:`volume-detach` コマンドは何も出力しません。" - -msgid "" -"The API used to access the OpenStack Identity service provided through " -"keystone." -msgstr "" -"keystone が提供する OpenStack Identity サービスへのアクセスに使用される API。" - -msgid "The Amazon commercial block storage product." -msgstr "Amazon のブロックストレージの商用製品。" - -msgid "The Amazon commercial compute product, similar to Compute." -msgstr "Amazon の商用コンピュート製品。Compute と似ている。" - -msgid "" -"The Apache Software Foundation supports the Apache community of open-source " -"software projects. These projects provide software products for the public " -"good." -msgstr "" -"The Apache Software Foundation は、オープンソースソフトウェアプロジェクトの " -"Apache コミュニティーをサポートする。これらのプロジェクトは、公共財のためにソ" -"フトウェア製品を提供する。" - -msgid "The Block Storage driver for the SolidFire iSCSI storage appliance." -msgstr "" -"SolidFire iSCSI ストレージアプライアンス向けの Block Storage ドライバー。" - -msgid "The Block Storage service consists of the following components:" -msgstr "Block Storage サービスは、以下のコンポーネントから構成されます。" - -msgid "" -"The Border Gateway Protocol is a dynamic routing protocol that connects " -"autonomous systems. Considered the backbone of the Internet, this protocol " -"connects disparate networks to form a larger network." -msgstr "" -"Border Gateway Protocol は、自律システムを接続する、動的ルーティングプロトコ" -"ルである。インターネットのバックボーンと比べて、このプロトコルは、より大きな" -"ネットワークを形成するために、異なるネットワークを接続する。" - -msgid "The Ceph storage daemon." -msgstr "Ceph ストレージデーモン。" - -msgid "" -"The Compute RabbitMQ message exchange that remains active when the server " -"restarts." -msgstr "" -"サーバーの再起動時に有効なままになる Compute の RabbitMQ メッセージ交換。" - -msgid "" -"The Compute VM scheduling algorithm that attempts to start a new VM on the " -"host with the least amount of load." -msgstr "" -"新規仮想マシンを合計負荷の最も低いホストで起動しようとする、Compute 仮想マシ" -"ンスケジューリングアルゴリズム。" - -msgid "" -"The Compute component that chooses suitable hosts on which to start VM " -"instances." -msgstr "" -"仮想マシンインスタンスを起動するために適切なホストを選択する Compute のコン" -"ポーネント。" - -msgid "" -"The Compute component that contains a list of the current capabilities of " -"each host within the cell and routes requests as appropriate." -msgstr "" -"セル内にある各ホストの現在のキャパシティー一覧を持ち、リクエストを適切にルー" -"ティングする、Compute のコンポーネント。" - -msgid "" -"The Compute component that gives IP addresses to authorized nodes and " -"assumes DHCP, DNS, and routing configuration and services are provided by " -"something else." -msgstr "" -"認可されたノードに IP アドレスを割り当てる Compute のコンポーネント。DHCP、" -"DNS、ルーティングの設定とサービスが別の何かにより提供されることを仮定してい" -"る。" - -msgid "" -"The Compute component that manages various network components, such as " -"firewall rules, IP address allocation, and so on." -msgstr "" -"ファイアウォールのルール、IP アドレスの割り当てなど、さまざまなネットワークの" -"コンポーネントを管理する、Compute のコンポーネント。" - -msgid "" -"The Compute component that provides dnsmasq (DHCP, DNS, BOOTP, TFTP) and " -"radvd (routing) services." -msgstr "" -"dnsmasq (DHCP、DNS、BOOTP、TFTP) や radvd (ルーティング) のサービスを提供す" -"る Compute のコンポーネント。" - -msgid "" -"The Compute component that runs on each compute node and manages the VM " -"instance lifecycle, including run, reboot, terminate, attach/detach volumes, " -"and so on. Provided by the nova-compute daemon." -msgstr "" -"各ノードで動作し、仮想マシンインスタンスのライフサイクル (実行、再起動、終" -"了、ボリュームの接続や切断など) を管理する、Compute のコンポーネント。nova-" -"compute デーモンにより提供される。" - -msgid "" -"The Compute direct exchanges, fanout exchanges, and topic exchanges use this " -"key to determine how to process a message; processing varies depending on " -"exchange type." -msgstr "" -"Compute の直接交換、ファンアウト交換、トピック交換は、このキーを使用して、" -"メッセージを処理する方法を判断する。処理内容は交換形式に応じて変化する。" - -msgid "" -"The Compute scheduling method that attempts to fill a host with VMs rather " -"than starting new VMs on a variety of hosts." -msgstr "" -"様々なホスト上で新しい VM を起動するよりも、なるべく一つのホストを埋めようと" -"する Compute スケジューリング手法。" - -msgid "" -"The Compute service can send alerts through its notification system, which " -"includes a facility to create custom notification drivers. Alerts can be " -"sent to and displayed on the horizon dashboard." -msgstr "" -"Compute のサービスは、通知システム経由で警告を送信できる。カスタム通知ドライ" -"バーを作成する機能がある。警告は、送信したり、ダッシュボードに表示したりでき" -"る。" - -msgid "" -"The Compute service provides accounting information through the event " -"notification and system usage data facilities." -msgstr "" -"Compute サービスは、イベント通知やシステム使用状況データ機能からアカウンティ" -"ング情報を提供する。" - -msgid "The Compute setting that enables or disables RAM overcommitment." -msgstr "RAM オーバーコミットを有効化または無効化する Compute の設定。" - -msgid "" -"The Data processing service for OpenStack (sahara) aims to provide users " -"with a simple means to provision data processing (Hadoop, Spark) clusters by " -"specifying several parameters like Hadoop version, cluster topology, node " -"hardware details and a few more. After a user fills in all the parameters, " -"the Data processing service deploys the cluster in a few minutes. Sahara " -"also provides a means to scale already provisioned clusters by adding or " -"removing worker nodes on demand." -msgstr "" -"OpenStack 用 Data processing サービス (sahara) の目的は、 Hadoop バージョン、" -"クラスターのトポロジー、ノードハードウェアの詳細などのいくつかのパラメーター" -"を指定するだけで、(Hadoop や Spark の) データ処理クラスターをセットアップする" -"簡単な方法をユーザーに提供することです。ユーザーが必要なパラメーターをすべて" -"入力すると、 Data processing サービスは数分でクラスターを展開します。 要求に" -"応じてワーカーノードの追加、削除を行って、すでに展開済みのクラスターのサイズ" -"を変更する手段も提供しています。" - -msgid "The Database service includes the following components:" -msgstr "Database サービスは以下のコンポーネントで構成されます。" - -msgid "" -"The Database service provides resource isolation at high performance levels, " -"and automates complex administrative tasks such as deployment, " -"configuration, patching, backups, restores, and monitoring." -msgstr "" -"Database サービスは、高い性能レベルでのリソースの分離ができ、配備、設定、パッ" -"チ適用、バックアップ、リストア、監視などの複雑な管理作業を自動化します。" - -msgid "" -"The Database service provides scalable and reliable cloud provisioning " -"functionality for both relational and non-relational database engines. Users " -"can quickly and easily use database features without the burden of handling " -"complex administrative tasks. Cloud users and database administrators can " -"provision and manage multiple database instances as needed." -msgstr "" -"Database サービスは、リレーショナルデータベースと非リレーショナルデータベース" -"の両方のエンジン向けにスケール可能な信頼できるクラウド展開機能を提供します。" -"ユーザーは、複雑な管理作業を実行することなく、迅速かつ容易にデータベース機能" -"を利用できます。クラウドのユーザーとデータベース管理者は、必要に応じて、複数" -"のデータベースインスタンスを配備および管理できます。" - -msgid "The ID of an already existent volume." -msgstr "既存のボリュームの ID。" - -msgid "The Identity component that provides high-level authorization services." -msgstr "高レベルの認可サービスを提供する Identity のコンポーネント。" - -msgid "The Identity service component that provides authentication services." -msgstr "認証サービスを提供する Identity のコンポーネント。" - -msgid "The Identity service contains these components:" -msgstr "Identity サービスには、以下のコンポーネントがあります。" - -msgid "" -"The Identity service endpoint template that contains services available to " -"all tenants." -msgstr "" -"すべてのテナントが利用可能なサービスを含む、Identity のエンドポイントテンプ" -"レート。" - -msgid "The Image service API endpoint for management of VM images." -msgstr "仮想マシンイメージの管理用の Image service API エンドポイント。" - -msgid "The Launchpad Bugs area" -msgstr "Launchpad バグエリア" - -msgid "" -"The Network Controller provides virtual networks to enable compute servers " -"to interact with each other and with the public network. All machines must " -"have a public and private network interface. A VLAN network is a private " -"network interface, which is controlled by the ``vlan_interface`` option with " -"VLAN managers." -msgstr "" -"ネットワークコントローラーは、コンピュートサーバー間、およびコンピュートサー" -"バーとパブリックネットワークとの通信を行う仮想ネットワークを用意する。すべて" -"の物理マシンにはパブリック側とプライベート側のネットワークインタフェースが必" -"要。VLAN ネットワークはプライベート側のネットワークインタフェースで、VLAN マ" -"ネージャーの ``vlan_interface`` オプションで指定される。" - -msgid "" -"The Network Controller provides virtual networks to enable compute servers " -"to interact with each other and with the public network. All machines must " -"have a public and private network interface. A private network interface can " -"be a flat or VLAN network interface. A flat network interface is controlled " -"by the flat_interface with flat managers. A VLAN network interface is " -"controlled by the ``vlan_interface`` option with VLAN managers." -msgstr "" -"ネットワークコントローラーは、コンピュートサーバー間、およびコンピュートサー" -"バーとパブリックネットワークとの通信を行う仮想ネットワークを用意する。すべて" -"の物理マシンにはパブリック側とプライベート側のネットワークインタフェースが必" -"要。プライベートネットワークインターフェースは、フラットネットワークまたは " -"VLAN ネットワークインターフェースにできる。フラットネットワークインターフェー" -"スは、フラットマネージャーを用いて flat_interface により制御される。VLAN ネッ" -"トワークインターフェースは、 VLAN マネージャーの ``vlan_interface`` オプショ" -"ンにより制御される。" - -msgid "" -"The Network Controller provides virtual networks to enable compute servers " -"to interact with each other and with the public network. All machines must " -"have a public and private network interface. The public network interface is " -"controlled by the ``public_interface`` option." -msgstr "" -"コンピュートサーバーがパブリックネットワークと相互通信できるよう、ネットワー" -"クコントローラーが仮想ネットワークを提供する。全マシンにはパブリックとプライ" -"ベートのネットワークインターフェースがなければならない。パブリックネットワー" -"クインターフェースは ``public_interface`` オプションにより制御される。" - -msgid "" -"The Object Storage back-end process that creates and manages object replicas." -msgstr "" -"オブジェクトの複製を作成および管理する Object Storage のバックエンドプロセ" -"ス。" - -msgid "" -"The Object Storage component that provides container services, such as " -"create, delete, list, and so on." -msgstr "" -"作成、削除、一覧表示などのコンテナーサービスを提供する Object Storage のコン" -"ポーネント。" - -msgid "" -"The Object Storage context of an account. Do not confuse with a user account " -"from an authentication service, such as Active Directory, /etc/passwd, " -"OpenLDAP, OpenStack Identity, and so on." -msgstr "" -"Object Storage のアカウントのコンテキスト。Active Directory、/etc/passwd、" -"OpenLDAP、OpenStack Identity などの認証サービスのユーザーアカウントと混同しな" -"いこと。" - -msgid "" -"The OpenStack Administrator configures the basic infrastructure using the " -"following steps:" -msgstr "OpenStack の管理者は、以下の手順で、基本的なインフラを設定します。" - -msgid "" -"The OpenStack Block Storage service (cinder) adds persistent storage to a " -"virtual machine. Block Storage provides an infrastructure for managing " -"volumes, and interacts with OpenStack Compute to provide volumes for " -"instances. The service also enables management of volume snapshots, and " -"volume types." -msgstr "" -"OpenStack Block Storage サービス (cinder) は、永続ストレージを仮想マシンに提" -"供します。Block Storage は、ボリュームを管理するインフラを提供し、ボリューム" -"をインスタンスに提供するために OpenStack Compute と通信します。このサービスに" -"より、ボリュームのスナップショットやボリューム種別を管理できます。" - -msgid "The OpenStack IRC channel" -msgstr "OpenStack IRC チャンネル" - -msgid "The OpenStack Image service includes the following components:" -msgstr "OpenStack Image service には、以下のコンポーネントがあります。" - -msgid "" -"The OpenStack Image service is central to Infrastructure-as-a-Service (IaaS) " -"as shown in :ref:`get_started_conceptual_architecture`. It accepts API " -"requests for disk or server images, and metadata definitions from end users " -"or OpenStack Compute components. It also supports the storage of disk or " -"server images on various repository types, including OpenStack Object " -"Storage." -msgstr "" -"OpenStack Image service は、:ref:`get_started_conceptual_architecture` に書か" -"れているように Infrastructure-as-a-Service (IaaS) の中核です。エンドユーザー" -"や OpenStack Compute のコンポーネントからの、ディスクやサーバーイメージ、メタ" -"データ定義に関する API リクエストを受け付けます。ディスクやサーバーイメージの" -"保管場所として、OpenStack Object Storage などの様々な種類のリポジトリーに対応" -"しています。" - -msgid "" -"The OpenStack Object Storage is a multi-tenant object storage system. It is " -"highly scalable and can manage large amounts of unstructured data at low " -"cost through a RESTful HTTP API." -msgstr "" -"OpenStack Object Storage は、マルチテナントのオブジェクトストレージシステムで" -"す。高いスケーラビリティーを持ちます。RESTful HTTP API 経由で大規模な非構造" -"データを低コストで管理できます。" - -msgid "" -"The OpenStack community lives in the #openstack IRC channel on the Freenode " -"network. You can hang out, ask questions, or get immediate feedback for " -"urgent and pressing issues. To install an IRC client or use a browser-based " -"client, go to `https://webchat.freenode.net/ `__. You can also use Colloquy (Mac OS X, http://colloquy.info/), mIRC " -"(Windows, http://www.mirc.com/), or XChat (Linux). When you are in the IRC " -"channel and want to share code or command output, the generally accepted " -"method is to use a Paste Bin. The OpenStack project has one at http://paste." -"openstack.org. Just paste your longer amounts of text or logs in the web " -"form and you get a URL that you can paste into the channel. The OpenStack " -"IRC channel is ``#openstack`` on ``irc.freenode.net``. You can find a list " -"of all OpenStack IRC channels at https://wiki.openstack.org/wiki/IRC." -msgstr "" -"OpenStack コミュニティーは、Freenode ネットワークに #openstack IRC チャンネル" -"を持っています。緊急の問題に関して、ここに参加して、質問して、すぐにフィード" -"バックを得ることができます。IRC クライアントをインストールする場合、ブラウ" -"ザーベースのクライアントを使用する場合、`https://webchat.freenode.net/ " -"`__ にアクセスしてください。Colloquy (Mac OS " -"X, http://colloquy.info/)、mIRC (Windows, http://www.mirc.com/)、XChat " -"(Linux) を使用することもできます。IRC チャンネルに参加して、コードやコマンド" -"の出力を共有したい場合、一般的に Paste Bin を使用する方法があります。" -"OpenStack プロジェクトには http://paste.openstack.org があります。長いテキス" -"トやログをウェブフォームに貼り付けるだけです。そして、URL を取得して IRC チャ" -"ンネルに貼り付けます。OpenStack の IRC チャンネルは ``irc.freenode.net`` にあ" -"る ``#openstack`` です。OpenStack の IRC チャンネル一覧は https://wiki." -"openstack.org/wiki/IRC にあります。" - -msgid "" -"The OpenStack community values your set up and testing efforts and wants " -"your feedback. To log a bug, you must sign up for a Launchpad account at " -"https://launchpad.net/+login. You can view existing bugs and report bugs in " -"the Launchpad Bugs area. Use the search feature to determine whether the bug " -"has already been reported or already been fixed. If it still seems like your " -"bug is unreported, fill out a bug report." -msgstr "" -"OpenStack コミュニティーは、あなたの構成やテスト作業を重視しており、あなたの" -"フィードバックを期待しています。バグを報告する場合、 https://launchpad.net/" -"+login で Launchpad アカウントをサインアップする必要があります。既存のバグを" -"確認したり、Launchpad バグエリアにバグを報告したりできます。検索機能を使用し" -"て、バグがすでに報告されているか、すでに解決されているかを確認できます。バグ" -"がまだ報告されていないようであれば、バグ報告を記入します。" - -msgid "" -"The OpenStack configuration files use an INI format to describe options and " -"their values. It consists of sections and key value pairs." -msgstr "" -"OpenStack 設定ファイルは、オプションやその値を記述するために、INI 形式を使用" -"する。セクションとキーバリューペアから構成される。" - -msgid "" -"The OpenStack core project that enables management of volumes, volume " -"snapshots, and volume types. The project name of Block Storage is cinder." -msgstr "" -"ボリューム、ボリュームのスナップショット、ボリューム種別を管理する、" -"OpenStack のコアプロジェクト。Block Storage のプロジェクト名は cinder。" - -msgid "" -"The OpenStack core project that provides a central directory of users mapped " -"to the OpenStack services they can access. It also registers endpoints for " -"OpenStack services. It acts as a common authentication system. The project " -"name of Identity is keystone." -msgstr "" -"ユーザーがアクセスできる OpenStack サービスに対応付けられた、ユーザーの中央" -"ディレクトリーを提供する、OpenStack コアプロジェクト。OpenStack サービスのエ" -"ンドポイントも登録する。一般的な認証システムとして動作する。Identity のプロ" -"ジェクト名は keystone。" - -msgid "" -"The OpenStack core project that provides compute services. The project name " -"of Compute service is nova." -msgstr "" -"コンピュートサービスを提供する OpenStack のコアプロジェクト。Compute のプロ" -"ジェクト名は nova。" - -msgid "" -"The OpenStack core project that provides eventually consistent and redundant " -"storage and retrieval of fixed digital content. The project name of " -"OpenStack Object Storage is swift." -msgstr "" -"結果整合性(eventually consistent)、ストレージ冗長化、静的デジタルコンテンツ" -"取得、といった機能を提供する、OpenStack のコアプロジェクト。OpenStack Object " -"Storage のプロジェクト名は swift。" - -msgid "" -"The OpenStack dashboard by default on Ubuntu installs the ``openstack-" -"dashboard-ubuntu-theme`` package." -msgstr "" -"Ubuntu の OpenStack dashboard は、デフォルトで ``openstack-dashboard-ubuntu-" -"theme`` パッケージをインストールします。" - -msgid "" -"The OpenStack dashboard is a modular `Django web application `__ that provides a graphical interface to OpenStack " -"services." -msgstr "" -"OpenStack dashboard は、モジュール型の `Django Web アプリケーション`__ です。OpenStack サービスのグラフィカルインター" -"フェースを提供します。" - -msgid "The OpenStack documentation uses several typesetting conventions." -msgstr "OpenStack のドキュメントでは、以下の表記規則を採用しています。" - -msgid "" -"The OpenStack end user deploys the Database service using the following " -"steps:" -msgstr "" -"OpenStack のエンドユーザーは、以下の手順で Database サービスを配備します。" - -msgid "" -"The OpenStack project is an open source cloud computing platform for all " -"types of clouds, which aims to be simple to implement, massively scalable, " -"and feature rich. Developers and cloud computing technologists from around " -"the world create the OpenStack project." -msgstr "" -"OpenStack project はあらゆる種類のクラウド向けのオープンソースのクラウドコン" -"ピューティングプラットフォームです。シンプルな実装、大規模なスケーラビリ" -"ティ、豊富な機能を目指しています。世界中の開発者とクラウドコンピューティング" -"技術者が OpenStack project を作っています。" - -msgid "" -"The OpenStack project that OpenStack project that implements clustering " -"services and libraries for the management of groups of homogeneous objects " -"exposed by other OpenStack services. The project name of Clustering service " -"is senlin." -msgstr "" -"クラスタリングサービスと、他の OpenStack サービスにより公開された均質なオブ" -"ジェクトグループを管理するためのライブラリーを実現する OpenStack プロジェク" -"ト。このプロジェクトのコード名は senlin。" - -msgid "" -"The OpenStack project that provides a multi-tenant, highly scalable, " -"performant, fault-tolerant Monitoring-as-a-Service solution for metrics, " -"complex event processing, and logging. It builds an extensible platform for " -"advanced monitoring services that can be used by both operators and tenants " -"to gain operational insight and visibility, ensuring availability and " -"stability. The project name is monasca." -msgstr "" -"マルチテナントで、高いスケーラビリティーを持ち、高性能で、耐障害性のある、" -"Monitoring-as-a-Service ソリューションを提供する OpenStack プロジェクト。 計" -"測情報、複合イベント処理 (complex event processing)、ログ監視が対象。オペレー" -"ター、テナントの両者が利用できる、高度なモニタリングサービスに対応できる拡張" -"性のあるプラットフォームを開発しており、可用性と安定性を確保しながら、運用上" -"の問題の特定や可視化を実現できる。プロジェクト名は monasca。" - -msgid "" -"The OpenStack project that provides integrated tooling for backing up, " -"restoring, and recovering file systems, instances, or database backups. The " -"project name is freezer." -msgstr "" -"ファイルシステム、インスタンス、データベースバックアップのバックアップ、リス" -"トア、リカバリー用の統合ツールを提供する OpenStack プロジェクト。プロジェクト" -"名は freezer。" - -msgid "The OpenStack stack uses the following storage types:" -msgstr "OpenStack のスタックは、以下のストレージ種別を使用します。" - -msgid "The OpenStack wiki" -msgstr "OpenStack wiki" - -msgid "The Orchestration service consists of the following components:" -msgstr "Orchestration サービスは、以下のコンポーネントから構成されます。" - -msgid "" -"The Orchestration service provides a template-based orchestration for " -"describing a cloud application by running OpenStack API calls to generate " -"running cloud applications. The software integrates other core components of " -"OpenStack into a one-file template system. The templates allow you to create " -"most OpenStack resource types such as instances, floating IPs, volumes, " -"security groups, and users. It also provides advanced functionality such as " -"instance high availability, instance auto-scaling, and nested stacks. This " -"enables OpenStack core projects to receive a larger user base." -msgstr "" -"Orchestration サービスは、クラウドアプリケーションを記述できるテンプレート" -"ベースのオーケストレーション機能を提供します。 OpenStack API を呼び出して、実" -"際に動くクラウドアプリケーションを生成します。このサービスにより、他の " -"OpenStack のコアコンポーネントを 1ファイルのテンプレートシステムに統合できま" -"す。テンプレートを使って、ほとんどの種類の OpenStack リソースを作成できます。" -"例えば、インスタンス、 Floating IP、ボリューム、セキュリティーグループ、ユー" -"ザーなどです。インスタンスの高可用化、インスタンスのオートスケーリング、入れ" -"子になったスタックといった、高度な機能も提供しています。これにより、より多く" -"のユーザーが OpenStack コアプロジェクトを利用することになります。" - -msgid "The POSIX-compliant file system provided by Ceph." -msgstr "Ceph により提供される POSIX 互換ファイルシステム。" - -msgid "" -"The SCSI disk protocol tunneled within Ethernet, supported by Compute, " -"Object Storage, and Image service." -msgstr "" -"イーサネット内でトンネルされる SCSI ディスクプロトコル。Compute、Object " -"Storage、Image service によりサポートされる。" - -msgid "The Shared File Systems service consists of the following components:" -msgstr "Shared File Systems サービスは、以下のコンポーネントから構成されます。" - -msgid "" -"The Simple Protocol for Independent Computing Environments (SPICE) provides " -"remote desktop access to guest virtual machines. It is an alternative to " -"VNC. SPICE is supported by OpenStack." -msgstr "" -"Simple Protocol for Independent Computing Environments (SPICE) は、ゲスト仮想" -"マシンに対するリモートデスクトップアクセスを提供する。VNC の代替品。SPICE は " -"OpenStack によりサポートされる。" - -msgid "The Telemetry Alarming service consists of the following components:" -msgstr "Telemetry Alarming サービスは、以下のコンポーネントから構成されます。" - -msgid "" -"The Telemetry Alarming services trigger alarms when the collected metering " -"or event data break the defined rules." -msgstr "" -"Telemetry Alarming サービスは、収集したメーターやイベントデータが定義済みルー" -"ルを満たさない場合、アラームを生成します。" - -msgid "The Telemetry Data Collection services provide the following functions:" -msgstr "Telemetry Data Collection サービスは、以下の機能を持ちます。" - -msgid "The Telemetry service consists of the following components:" -msgstr "Telemetry サービスは、以下のコンポーネントから構成されます。" - -msgid "" -"The URL where the data for this image resides. For example, if the image " -"data is stored in swift, you could specify ``swift://account:key@example.com/" -"container/obj``." -msgstr "" -"このイメージのデータが格納されている URL。たとえば、イメージデータが swift に" -"格納されている場合 ``swift://account:key@example.com/container/obj`` を指定し" -"ます。 " - -msgid "The Xen administrative API, which is supported by Compute." -msgstr "Xen 管理 API。Compute によりサポートされる。" - -msgid "" -"The `OpenStack wiki `__ contains a broad range " -"of topics but some of the information can be difficult to find or is a few " -"pages deep. Fortunately, the wiki search feature enables you to search by " -"title or content. If you search for specific information, such as about " -"networking or OpenStack Compute, you can find a large amount of relevant " -"material. More is being added all the time, so be sure to check back often. " -"You can find the search box in the upper-right corner of any OpenStack wiki " -"page." -msgstr "" -"`OpenStack wiki `__ には、さまざまな情報がありま" -"す。しかし、いくつかの情報は見つけにくく、深い場所にあります。幸い、wiki 検索" -"機能により、見出しや内容を検索できます。ネットワークや OpenStack Compute な" -"ど、特定の情報を探している場合、非常に多くの関連項目を見つけられます。情報が" -"いつでも追加されているので、ときどき確認し直してください。すべての OpenStack " -"wiki ページの右上に、検索ボックスがあります。" - -msgid "" -"The ``cinder-backup`` service provides backing up volumes of any type to a " -"backup storage provider. Like the ``cinder-volume`` service, it can interact " -"with a variety of storage providers through a driver architecture." -msgstr "" -"``cinder-backup`` サービスは、あらゆる種類のボリュームのバックアップをバック" -"アップストレージプロバイダーに提供します。 ``cinder-volume`` サービスのよう" -"に、ドライバーアーキテクチャーになっており、さまざまな種類のストレージプロバ" -"イダーを利用できます。" - -msgid "" -"The ``cpu`` column shows the sum of the virtual CPUs for instances running " -"on the host." -msgstr "" -"``cpu`` の列は、ホストで実行中のインスタンスの仮想 CPU 総数を表示します。" - -msgid "" -"The ``disk_gb`` column shows the sum of the root and ephemeral disk sizes " -"(in GB) of the instances that run on the host." -msgstr "" -"``disk_gb`` の列は、ホストで実行中のインスタンスの一時ディスクのサイズ (GB) " -"と root の合計を表示します。" - -msgid "" -"The ``memory_mb`` column shows the sum of the memory (in MB) allocated to " -"the instances that run on the host." -msgstr "" -"``memory_mb`` の列は、ホストで実行中のインスタンスに割り当てられたメモリー " -"(MB) 合計を表示します。" - -msgid "" -"The ``nova-network`` worker daemon; provides services such as giving an IP " -"address to a booting nova instance." -msgstr "" -"``nova-network`` ワーカーデーモン。起動中の Nova インスタンスに IP アドレスを" -"付与する等のサービスを提供する。" - -msgid "" -"The ``root`` user must run commands that are prefixed with the ``#`` prompt. " -"You can also prefix these commands with the :command:`sudo` command, if " -"available, to run them." -msgstr "" -"``#`` プロンプトから始まるコマンドは ``root`` ユーザーで実行する必要がありま" -"す。これらのコマンドを実行するために :command:`sudo` コマンドを使用することも" -"できます。" - -msgid "" -"The ability to encrypt data at the file system, disk partition, or whole-" -"disk level. Supported within Compute VMs." -msgstr "" -"ファイルシステム、ディスクパーティション、ディスク全体を暗号化する機能。" -"Compute の仮想マシン内でサポートされる。" - -msgid "" -"The ability to start new VM instances based on the actual memory usage of a " -"host, as opposed to basing the decision on the amount of RAM each running " -"instance thinks it has available. Also known as RAM overcommit." -msgstr "" -"実行中の各インスタンスが利用可能と考えている RAM 量に基づく判断をベースにする" -"代わりに、ホスト上の実際のメモリ使用量をベースにした、新しい VM インスタンス" -"を起動する機能。" - -msgid "" -"The ability to start new VM instances based on the actual memory usage of a " -"host, as opposed to basing the decision on the amount of RAM each running " -"instance thinks it has available. Also known as memory overcommit." -msgstr "" -"実行中の各インスタンスが利用可能と考えている RAM 量に基づく判断をベースにする" -"代わりに、ホスト上の実際のメモリ使用量をベースにした、新しい VM インスタンス" -"を起動する機能。" - -msgid "" -"The ability within Compute to move running virtual machine instances from " -"one host to another with only a small service interruption during switchover." -msgstr "" -"切り替え中のわずかなサービス中断のみで、実行中の仮想マシンをあるホストから別" -"のホストに移動する、Compute 内の機能。" - -msgid "" -"The act of verifying that a user, process, or client is authorized to " -"perform an action." -msgstr "" -"ユーザー、プロセス、クライアントが操作を実行する権限を持つかどうかを確認する" -"こと。" - -msgid "" -"The amount of available data used by communication resources, such as the " -"Internet. Represents the amount of data that is used to download things or " -"the amount of data available to download." -msgstr "" -"インターネットなどの通信リソースにより使用される、利用可能なデータ量。何かを" -"ダウンロードするために使用されるデータの合計量、またはダウンロードするために" -"利用可能なデータの合計量を表す。" - -msgid "" -"The amount of time it takes for a new Object Storage object to become " -"accessible to all clients." -msgstr "" -"Object Storage の新規オブジェクトがすべてのクライアントからアクセス可能になる" -"までにかかる時間。" - -msgid "" -"The association between an Image service VM image and a tenant. Enables " -"images to be shared with specified tenants." -msgstr "" -"Image service の仮想マシンイメージとテナント間の関連。イメージを特別なテナン" -"トと共有できるようになる。" - -msgid "" -"The back-end store used by Image service to store VM images, options include " -"Object Storage, local file system, S3, or HTTP." -msgstr "" -"仮想マシンイメージを保存するために、Image service により使用されるバックエン" -"ドストア。オプションとして、Object Storage、ローカルファイルシステム、S3、" -"HTTP がある。" - -msgid "" -"The cloud operator assigns roles to users. Roles determine who can upload " -"and manage images. The operator might restrict image upload and management " -"to only cloud administrators or operators." -msgstr "" -"クラウド運用者はユーザーにロールを割り当てます。ロールはイメージをアップロー" -"ドおよび管理できるユーザーを決定します。運用者はイメージのアップロードと管理" -"をクラウド管理者や運用者のみに制限するかもしれません。" - -msgid "" -"The code name for the eighth release of OpenStack. The design summit took " -"place in Portland, Oregon, US and Havana is an unincorporated community in " -"Oregon." -msgstr "" -"OpenStack の 8 番目のリリースのコード名。デザインサミットがアメリカ合衆国オレ" -"ゴン州ポートランドで開催された。Havana は、オレゴン州の非法人コミュニティーで" -"ある。" - -msgid "" -"The code name for the eleventh release of OpenStack. The design summit took " -"place in Paris, France. Due to delays in the name selection, the release was " -"known only as K. Because ``k`` is the unit symbol for kilo and the reference " -"artifact is stored near Paris in the Pavillon de Breteuil in Sèvres, the " -"community chose Kilo as the release name." -msgstr "" -"OpenStack の 11 番目のリリースのコード名。デザインサミットは、フランスのパリ" -"で開催された。名前選定の遅れにより、このリリースは K のみで知られていた。 " -"``k`` はキロを表す単位記号であり、その原器がパリ近郊の Pavillon de Breteuil " -"in Sèvres に保存されているので、コミュニティーはリリース名として Kilo を選択" -"した。" - -msgid "" -"The code name for the fifteenth release of OpenStack. The design summit will " -"take place in Barcelona, Spain. Ocata is a beach north of Barcelona." -msgstr "" -"OpenStack の 14 番目のリリースのコード名。デザインサミットは、スペインのバル" -"セロナで開催される。Ocata はバルセロナ北部のビーチ。" - -msgid "" -"The code name for the initial release of OpenStack. The first design summit " -"took place in Austin, Texas, US." -msgstr "" -"OpenStack の初期リリースのコード名。最初のデザインサミットは、アメリカ合衆国" -"テキサス州オースチンで開催された。" - -msgid "" -"The code name for the ninth release of OpenStack. The design summit took " -"place in Hong Kong and Ice House is a street in that city." -msgstr "" -"OpenStack の 9 番目のリリースのコード名。デザインサミットは、香港で開催され" -"た。Ice House は、その近くにある通りである。" - -msgid "" -"The code name for the seventh release of OpenStack. The design summit took " -"place in San Diego, California, US and Grizzly is an element of the state " -"flag of California." -msgstr "" -"OpenStack の 7 番目のリリースのコード名。デザインサミットがアメリカ合衆国カリ" -"フォルニア州サンディエゴで開催された。Grizzly は、カリフォルニア州の州旗に使" -"われている。" - -msgid "" -"The code name for the tenth release of OpenStack. The design summit took " -"place in Atlanta, Georgia, US and Juno is an unincorporated community in " -"Georgia." -msgstr "" -"OpenStack の 10 番目のリリースのコード名。デザインサミットはアメリカ合衆国" -"ジョージア州アトランタにて開催された。Juno は、ジョージア州の非公式コミュニ" -"ティー。" - -msgid "" -"The code name for the thirteenth release of OpenStack. The design summit " -"took place in Tokyo, Japan. Mitaka is a city in Tokyo." -msgstr "" -"OpenStack の 13 番目のリリースのコード名。デザインサミットは、日本の東京で開" -"催された。三鷹は、東京にある都市です。" - -msgid "" -"The code name for the twelfth release of OpenStack. The design summit took " -"place in Vancouver, Canada and Liberty is the name of a village in the " -"Canadian province of Saskatchewan." -msgstr "" -"OpenStack の 12 番目のリリースのコード名。デザインサミットは、カナダのバン" -"クーバーで開催された。Liberty は、サスカチュワン州にある村の名前。" - -msgid "The collaboration site for OpenStack." -msgstr "OpenStack 用コラボレーションサイト。" - -msgid "" -"The common agents are L3 (layer 3), DHCP (dynamic host IP addressing), and a " -"plug-in agent." -msgstr "" -"共通のエージェントは、L3 エージェント、DHCP エージェント、プラグインエージェ" -"ントです。" - -msgid "" -"The container format of the image. Acceptable formats are ami, ari, aki, " -"bare, docker, and ovf." -msgstr "" -"イメージのコンテナー形式。対応形式は ami、ari、aki、docker、bare、ovf です。" - -msgid "" -"The cooperative threading model used by Python; reduces race conditions and " -"only context switches when specific library calls are made. Each OpenStack " -"service is its own thread." -msgstr "" -"Python により使用される協調スレッドモデル。特定のライブラリーコールが発行され" -"るときの競合状態とコンテキストスイッチを減らす。各 OpenStack サービスは自身の" -"スレッドである。" - -msgid "The current state of a guest VM image." -msgstr "ゲスト仮想マシンイメージの現在の状態。" - -msgid "" -"The current status of a VM image in Image service, not to be confused with " -"the status of a running instance." -msgstr "" -"Image service における仮想マシンイメージの現在の状態。実行中のインスタンスの" -"状態と混同しないこと。" - -msgid "" -"The daemon, worker, or service that a client communicates with to access an " -"API. API endpoints can provide any number of services, such as " -"authentication, sales data, performance meters, Compute VM commands, census " -"data, and so on." -msgstr "" -"クライアントが API にアクセスするために通信するデーモン、ワーカーまたはサービ" -"ス。API エンドポイントは、認証、売上データ、パフォーマンス統計、Compute 仮想" -"マシンコマンド、センサスデータなどのような数多くのサービスを提供できます。" - -msgid "" -"The dashboard is usually deployed through `mod_wsgi `__ in Apache. You can modify the dashboard code to make it " -"suitable for different sites." -msgstr "" -"ダッシュボードは、一般的に Apache の `mod_wsgi `__ 経由で配備されます。ダッシュボードのコードを修正して、それぞれの" -"サイトに適するように変更できます。" - -msgid "The default message queue software used by OpenStack." -msgstr "OpenStackでデフォルトで採用されているメッセージキューのソフトウェア。" - -msgid "" -"The default panel that is displayed when a user accesses the horizon " -"dashboard." -msgstr "" -"ユーザーがダッシュボードにアクセスした際に表示されるデフォルトのパネル。" - -msgid "" -"The disk format of the image. Acceptable formats are ami, ari, aki, vhd, " -"vmdk, raw, qcow2, vdi, and iso." -msgstr "" -"イメージのディスク形式。利用可能な形式は ami、ari、aki、vhd、vmdk、raw、" -"qcow2、vdi、iso です。" - -msgid "The fibre channel protocol tunneled within Ethernet." -msgstr "イーサネットでトンネルされるファイバーチャネルプロトコル。" - -msgid "The following Launchpad Bugs areas are available:" -msgstr "以下の Launchpad バグエリアが利用できます。" - -msgid "" -"The following Linux distributions provide community-supported packages for " -"OpenStack:" -msgstr "OpenStack のコミュニティサポート版を提供しているディストリビューション" - -msgid "" -"The following books explain how to configure and run an OpenStack cloud:" -msgstr "OpenStackクラウドの設定と実行に関するガイド:" - -msgid "" -"The following books explain how to install an OpenStack cloud and its " -"associated components:" -msgstr "OpenStack クラウドと関連コンポーネントの導入ガイド:" - -msgid "" -"The following books explain how to use the OpenStack dashboard and command-" -"line clients:" -msgstr "" -"以下のドキュメントは、OpenStack dashboard とコマンドラインクライアントの使用" -"方法を説明しています。" - -msgid "The following can easily be customized:" -msgstr "以下の項目を簡単にカスタマイズできます。" - -msgid "" -"The following diagram shows the most common, but not the only possible, " -"architecture for an OpenStack cloud:" -msgstr "" -"以下の図に、最も一般的な OpenStack クラウドのアーキテクチャーを示します。これ" -"が唯一の OpenStack のアーキテクチャーというわけではありません。" - -msgid "" -"The following diagram shows the relationships among the OpenStack services:" -msgstr "以下の図は OpenStack サービス間の関連性を示しています。" - -msgid "" -"The following documentation provides reference and guidance information for " -"the OpenStack APIs:" -msgstr "" -"以下のドキュメントは、OpenStack API に関するリファレンスとガイダンスを提供し" -"ます。" - -msgid "" -"The following example shows how to update an existing image with a " -"properties that describe the disk bus, the CD-ROM bus, and the VIF model:" -msgstr "" -"以下の例は、ディスクバス、CD-ROM バス、VIF モデルのプロパティを指定して、既存" -"のイメージを更新する方法を表します。" - -msgid "" -"The following example shows the command for installing the OpenStack client " -"with ``pip``, which supports multiple services." -msgstr "" -"以下の例は、``pip`` を用いて OpenStack client をインストールするコマンドを示" -"します。" - -msgid "" -"The following example shows the command that you would use to upload a " -"CentOS 6.3 image in qcow2 format and configure it for public access:" -msgstr "" -"以下の例は、CentOS 6.3 イメージを qcow2 形式でアップロードし、パブリックなア" -"クセス用に設定するために使用するコマンドを表します。" - -msgid "The following example unmanages the ``my-snapshot-id`` image:" -msgstr "以下の例は ``my-snapshot-id`` イメージを管理対象外にします。" - -msgid "" -"The following examples show the host usage statistics for a host called " -"``devstack``." -msgstr "以下の例では、``devstack`` という名前のホストの使用統計を表示します。" - -msgid "" -"The following guide provides how to contribute to OpenStack documentation:" -msgstr "" -"OpenStack ドキュメントに貢献する方法については以下のガイドに説明があります。" - -msgid "" -"The following individual clients are deprecated in favor of a common client. " -"Instead of installing and learning all these clients, we recommend " -"installing and using the OpenStack client. You may need to install an " -"individual project's client because coverage is not yet sufficient in the " -"OpenStack client. If you need to install an individual client's project, " -"replace the ```` name in this ``pip install`` command using the " -"list below." -msgstr "" -"以下の個別クライアントは、共通クライアントに置き換えられ、非推奨になりまし" -"た。これらのクライアントをすべてインストールして学習する代わりに、OpenStack " -"クライアントをインストールして使用することを推奨します。OpenStack クライアン" -"トにおいて十分にカバーされていないため、各プロジェクトのクライアントをインス" -"トールする必要があるかもしれません。各プロジェクトのクライアントをインストー" -"ルする必要がある場合、以下の一覧を使用して、この ``pip install`` の " -"```` の名前を置き換えてください。" - -msgid "" -"The following list explains the optional arguments that you can use with the " -"``create`` and ``update`` commands to modify image properties. For more " -"information, refer to Image service chapter in the `OpenStack Command-Line " -"Interface Reference `__." -msgstr "" -"以下の一覧は、イメージのプロパティを変更するために、``create`` コマンドと " -"``update`` コマンドで使用できるオプション引数の一覧です。詳細は `OpenStack " -"Command-Line Interface Reference `__ の Image service の章を参照してください。" - -msgid "" -"The following resources are available to help you run and use OpenStack. The " -"OpenStack community constantly improves and adds to the main features of " -"OpenStack, but if you have any questions, do not hesitate to ask. Use the " -"following resources to get OpenStack support, and troubleshoot your " -"installations." -msgstr "" -"OpenStack の利用に役立つリソースとして以下のものがあります。OpenStack コミュ" -"ニティーは、OpenStack を継続的に改善、機能追加していますが、もしあなたが何ら" -"かの疑問に直面したら、遠慮せずに相談してください。下記のリソースを OpenStack " -"のサポートとトラブルシュートに活用してください。" - -msgid "" -"The following table describes the OpenStack services that make up the " -"OpenStack architecture:" -msgstr "" -"以下の表は OpenStack アーキテクチャーを構成する OpenStack のサービスについて" -"まとめたものです。" - -msgid "" -"The following table lists the command-line client for each OpenStack service " -"with its package name and description." -msgstr "" -"以下の表は、各 OpenStack サービスのコマンドラインクライアント、そのパッケージ" -"名、説明の一覧です。" - -msgid "" -"The logo also acts as a hyperlink. The default behavior is to redirect to " -"``horizon:user_home``. To change this, add the following attribute to " -"``local_settings.py``:" -msgstr "" -"ロゴはハイパーリンクとしても機能します。デフォルトの動作では、``horizon:" -"user_home`` にリダイレクトします。これを変更するには、以下の属性を " -"``local_settings.py`` に追加します。" - -msgid "" -"The main virtual communication line used by all AMQP messages for inter-" -"cloud communications within Compute." -msgstr "" -"Compute 内でクラウド内通信のためにすべての AMQP メッセージにより使用されるメ" -"インの仮想通信ライン。" - -msgid "" -"The method of storage used by horizon to track client sessions, such as " -"local memory, cookies, a database, or memcached." -msgstr "" -"クライアントのセッションを管理するために、horizon により使用される保存方法。" -"ローカルメモリー、クッキー、データベース、memcached など。" - -msgid "" -"The method that a service uses for persistent storage, such as iSCSI, NFS, " -"or local disk." -msgstr "" -"サービスが、iSCSI、NFS、ローカルディスクなどの永続ストレージを使用する方式。" - -msgid "" -"The method used by the Compute RabbitMQ for intra-service communications." -msgstr "内部サービス通信のために Compute RabbitMQ により使用される方法。" - -msgid "The minimum amount of RAM needed to boot the image, in megabytes." -msgstr "ブートイメージに必要となるメモリの最小容量。ギガバイト単位。" - -msgid "The minimum size of the disk needed to boot the image, in gigabytes." -msgstr "ブートイメージに必要となるディスクの最小容量。ギガバイト単位。" - -msgid "The most common web server software currently used on the Internet." -msgstr "" -"現在インターネットにおいて使用されている最も一般的な Web サーバーソフトウェ" -"ア。" - -msgid "The name of the image." -msgstr "イメージの名前。" - -msgid "" -"The nova-api daemon provides access to nova services. Can communicate with " -"other APIs, such as the Amazon EC2 API." -msgstr "" -"Compute サービスへのアクセスを提供する nova-api デーモン。Amazon EC2 API のよ" -"うな他の API でも通信可能。" - -msgid "The number of replicas of the data in an Object Storage ring." -msgstr "Object Storage リングにおけるデータ複製数。" - -msgid "" -"The open standard messaging protocol used by OpenStack components for intra-" -"service communications, provided by RabbitMQ, Qpid, or ZeroMQ." -msgstr "" -"インフラサービス通信のために OpenStack コンポーネントにより使用されるオープン" -"な標準メッセージングプロトコル。RabbitMQ、Qpid、ZeroMQ により提供される。" - -msgid "" -"The output shows that the volume is attached to the server with ID " -"``84c6e57d-a6b1-44b6-81eb-fcb36afd31b5``, is in the nova availability zone, " -"and is bootable." -msgstr "" -"この出力から、このボリュームは ID が ``84c6e57d-a6b1-44b6-81eb-" -"fcb36afd31b5`` のサーバーに接続されていること、nova のアベイラビリティゾーン" -"内にあること、ブータブルであることが分かります。" - -msgid "" -"The output shows the volume transfer ID in the ``id`` row and the " -"authorization key." -msgstr "" -"この出力では、認証キーと、``id`` の行にボリュームの転送 ID が表示されます。" - -msgid "" -"The persistent data store used to save and retrieve information for a " -"service, such as lists of Object Storage objects, current state of guest " -"VMs, lists of user names, and so on. Also, the method that the Image service " -"uses to get and store VM images. Options include Object Storage, local file " -"system, S3, and HTTP." -msgstr "" -"Object Storage のオブジェクトの一覧、ゲスト仮想マシンの現在の状態、ユーザー名" -"の一覧など、サービスに関する情報を保存および取得するために使用される永続デー" -"タストア。また、Image service が仮想マシンイメージを取得および保存するために" -"使用する方式。Object Storage、ローカルファイルシステム、S3、HTTP などの選択肢" -"がある。" - -msgid "" -"The person responsible for installing, configuring, and managing an " -"OpenStack cloud." -msgstr "OpenStack クラウドのインストール、設定、管理に責任を持つ人。" - -msgid "" -"The person responsible for planning and maintaining an OpenStack " -"installation." -msgstr "OpenStack インストールを計画し、管理する責任者。" - -msgid "" -"The point where a user interacts with a service; can be an API endpoint, the " -"horizon dashboard, or a command-line tool." -msgstr "" -"ユーザーがサービスと通信する箇所。API エンドポイント、ダッシュボード、コマン" -"ドラインツールの可能性がある。" - -msgid "" -"The practice of placing one packet type within another for the purposes of " -"abstracting or securing data. Examples include GRE, MPLS, or IPsec." -msgstr "" -"データを抽象化やセキュア化する目的で、あるパケット形式を別の形式の中に入れる" -"ための方法。例えば、GRE、MPLS、IPsec などがある。" - -msgid "" -"The practice of utilizing a secondary environment to elastically build " -"instances on-demand when the primary environment is resource constrained." -msgstr "" -"主環境がリソース制限されたとき、要求時に応じてインスタンスを伸縮自在に構築す" -"るために、副環境を利用する慣習。" - -msgid "" -"The primary load balancing configuration object. Specifies the virtual IP " -"address and port where client traffic is received. Also defines other " -"details such as the load balancing method to be used, protocol, and so on. " -"This entity is sometimes known in load-balancing products as a virtual " -"server, vserver, or listener." -msgstr "" -"主たる負荷分散の設定オブジェクト。クライアント通信を受け付ける仮想 IP とポー" -"トを指定する。使用する負荷分散方式、プロトコルなどの詳細も定義する。このエン" -"ティティは、virtual server、vserver、listener のような負荷分散製品においても" -"知られている。" - -msgid "" -"The procedure for volume transfer is intended for tenants (both the volume " -"donor and recipient) within the same cloud." -msgstr "" -"ボリューム転送の手続きは、テナント (ボリュームの譲渡元と受信者) が同じクラウ" -"ド内にあることを意図しています。" - -msgid "" -"The process associating a Compute floating IP address with a fixed IP " -"address." -msgstr "" -"Compute の Floating IP アドレスと Fixed IP アドレスを関連づけるプロセス。" - -msgid "" -"The process of automating IP address allocation, deallocation, and " -"management. Currently provided by Compute, melange, and Networking." -msgstr "" -"IP アドレスの割り当て、割り当て解除、管理を自動化するプロセス。現在、" -"Compute、melange、Networking により提供される。" - -msgid "" -"The process of connecting a VIF or vNIC to a L2 network in Networking. In " -"the context of Compute, this process connects a storage volume to an " -"instance." -msgstr "" -"Networking において、仮想インターフェースや仮想 NIC を L2 ネットワークに接続" -"するプロセス。Compute の文脈では、ストレージボリュームをインスタンスに接続す" -"るプロセス。" - -msgid "" -"The process of copying data to a separate physical device for fault " -"tolerance and performance." -msgstr "" -"別の物理デバイスにデータをコピーする処理。耐障害性や性能のために行われる。" - -msgid "" -"The process of distributing Object Storage partitions across all drives in " -"the ring; used during initial ring creation and after ring reconfiguration." -msgstr "" -"リング内のすべてのドライブにわたり、Object Storage のパーティションを分散させ" -"る処理。初期リング作成中、リング再設定後に使用される。" - -msgid "" -"The process of filtering incoming network traffic. Supported by Compute." -msgstr "" -"入力ネットワーク通信をフィルタリングする処理。Compute によりサポートされる。" - -msgid "" -"The process of finding duplicate data at the disk block, file, and/or object " -"level to minimize storage use—currently unsupported within OpenStack." -msgstr "" -"ディスク使用を最小化するために、ディスクブロック、ファイル、オブジェクトレベ" -"ルにあるデータの重複を見つけるプロセス。現在 OpenStack 内では未サポート。" - -msgid "" -"The process of migrating one or all virtual machine (VM) instances from one " -"host to another, compatible with both shared storage live migration and " -"block migration." -msgstr "" -"1つまたは全ての仮想マシン(VM)インスタンスをあるホストから別のホストにマイ" -"グレーションする処理。共有ストレージのライブマイグレーションとブロックマイグ" -"レーション両方と互換がある。" - -msgid "The process of moving a VM instance from one host to another." -msgstr "VM インスタンスをあるホストから別のホストに移動させる処理。" - -msgid "" -"The process of putting a file into a virtual machine image before the " -"instance is started." -msgstr "" -"インスタンスが起動する前に、仮想マシンイメージ中にファイルを配置する処理。" - -msgid "" -"The process of removing the association between a floating IP address and a " -"fixed IP address. Once this association is removed, the floating IP returns " -"to the address pool." -msgstr "" -"Floating IP アドレスと Fixed IP アドレスの関連付けを解除する処理。この関連付" -"けが解除されると、Floating IP はアドレスプールに戻されます。" - -msgid "" -"The process of removing the association between a floating IP address and " -"fixed IP and thus returning the floating IP address to the address pool." -msgstr "" -"Floating IP アドレスと Fixed IP の関連付けを削除する処理。これにより、" -"Floating IP アドレスをアドレスプールに返す。" - -msgid "" -"The process of spreading client requests between two or more nodes to " -"improve performance and availability." -msgstr "" -"パフォーマンスや可用性を向上するために、2 つ以上のノード間でクライアントリク" -"エストを分散する処理。" - -msgid "" -"The process of taking a floating IP address from the address pool so it can " -"be associated with a fixed IP on a guest VM instance." -msgstr "" -"アドレスプールから Floating IP アドレスを取得するプロセス。ゲスト仮想マシンイ" -"ンスタンスに Fixed IP を関連付けられるようにする。" - -msgid "" -"The process that confirms that the user, process, or client is really who " -"they say they are through private key, secret token, password, fingerprint, " -"or similar method." -msgstr "" -"ユーザー、プロセスまたはクライアントが、秘密鍵、秘密トークン、パスワード、指" -"紋または同様の方式により示されている主体と本当に同じであることを確認するプロ" -"セス。" - -msgid "" -"The project name for the Telemetry service, which is an integrated project " -"that provides metering and measuring facilities for OpenStack." -msgstr "" -"Telemetry サービスのプロジェクト名。OpenStack 向けにメータリングと計測機能を" -"提供する、統合プロジェクト。" - -msgid "The project that provides OpenStack Identity services." -msgstr "OpenStack Identity サービスを提供するプロジェクト。" - -msgid "" -"The protocol by which layer-3 IP addresses are resolved into layer-2 link " -"local addresses." -msgstr "L3 IP プロトコルが L2 リンクローカルアドレスに解決されるプロトコル。" - -msgid "The queue" -msgstr "キュー" - -msgid "" -"The recommended way to install setuptools on Microsoft Windows is to follow " -"the documentation provided on the setuptools website (https://pypi.python." -"org/pypi/setuptools). Another option is to use the unofficial binary " -"installer maintained by Christoph Gohlke (`http://www.lfd.uci.edu/~gohlke/" -"pythonlibs/ #setuptools `__)." -msgstr "" -"Microsoft Windows に setuptools をインストールする推奨方法は、の Web サイト " -"(https://pypi.python.org/pypi/setuptools) にあるドキュメントに従うことです。" -"もう 1 つの選択肢は、Christoph Gohlke さんによりメンテナンスされている非公式" -"バイナリーインストーラー (`http://www.lfd.uci.edu/~gohlke/pythonlibs/" -"#setuptools `__) を使" -"用することです。" - -msgid "" -"The registry is a private internal service meant for use by OpenStack Image " -"service. Do not expose this service to users." -msgstr "" -"レジストリーは OpenStack Image service 自身が使用するプライベートな内部サービ" -"スです。ユーザーに公開しないでください。" - -msgid "" -"The router advertisement daemon, used by the Compute VLAN manager and " -"FlatDHCP manager to provide routing services for VM instances." -msgstr "" -"ルーター通知デーモン。仮想マシンインスタンスにルーティングサービスを提供する" -"ために、Compute の VLAN マネージャーと FlatDHCP マネージャーにより使用され" -"る。" - -msgid "" -"The row that has the value ``used_max`` in the ``PROJECT`` column shows the " -"sum of the resources allocated to the instances that run on the host." -msgstr "" -"``PROJECT`` 列にある ``used_max`` という値の行は、ホストで実行中のインスタン" -"スに割り当てられたリソースの合計を表示します。" - -msgid "" -"The row that has the value ``used_now`` in the ``PROJECT`` column shows the " -"sum of the resources allocated to the instances that run on the host, plus " -"the resources allocated to the virtual machine of the host itself." -msgstr "" -"``PROJECT`` 列にある ``used_now`` という値の行は、ホストで実行中のインスタン" -"スに割り当てられたリソースの合計と、ホスト自体の仮想マシンに割り当てられたリ" -"ソースを表示します。" - -msgid "" -"The service enables deployers to integrate with the Orchestration service " -"directly or through custom plug-ins." -msgstr "" -"オペレーターは 直接、あるいはカスタムプラグイン経由で様々なものを " -"Orchestration サービスと統合できます。" - -msgid "" -"The software package used to provide AMQP messaging capabilities within " -"Compute. Default package is RabbitMQ." -msgstr "" -"Compute 内で AMQP メッセージング機能を提供するために使用されるソフトウェア" -"パッケージ。標準のパッケージは RabbitMQ。" - -msgid "The solution addresses the following use cases:" -msgstr "このソリューションで、以下のユースケースが解決できます。" - -msgid "" -"The source used by Identity service to retrieve user information; an " -"OpenLDAP server, for example." -msgstr "" -"ユーザー情報を取得するために、Identity により使用されるソース。例えば、" -"OpenLDAP。" - -msgid "" -"The step in the Compute scheduling process when hosts that cannot run VMs " -"are eliminated and not chosen." -msgstr "" -"VM を実行できないホストを排除し、選択されないようにする Compute のスケジュー" -"リング処理の段階。" - -msgid "" -"The storage method used by the Identity service catalog service to store and " -"retrieve information about API endpoints that are available to the client. " -"Examples include an SQL database, LDAP database, or KVS back end." -msgstr "" -"クライアントが利用可能な API エンドポイントに関する情報を保存、取得するのに、" -"Identity サービスのカタログサービスが使用する保存方式。SQL データベース、" -"LDAP データベース、KVS バックエンドなどがある。" - -msgid "" -"The sum of each cost used when deciding where to start a new VM instance in " -"Compute." -msgstr "" -"Compute で新しい仮想マシンを起動する場所を判断するときに使用される各コストの" -"合計。" - -msgid "The tenant who owns an Image service virtual machine image." -msgstr "Image service の仮想マシンイメージを所有するテナント。" - -msgid "The tenant who should own the image. The size of image data, in bytes." -msgstr "イメージを所有するプロジェクト。イメージのバイト単位の容量。" - -msgid "" -"The transfer of data, usually in the form of files, from one computer to " -"another." -msgstr "" -"あるコンピューターから他のコンピューターへのデータの転送。通常はファイルの形" -"式。" - -msgid "" -"The underlying format that a disk image for a VM is stored as within the " -"Image service back-end store. For example, AMI, ISO, QCOW2, VMDK, and so on." -msgstr "" -"仮想マシンのディスクイメージが Image service のバックエンドストア内で保存され" -"る、バックエンドの形式。AMI、ISO、QCOW2、VMDK などがある。" - -msgid "" -"The universal measurement of how quickly data is transferred from place to " -"place." -msgstr "" -"データがある場所から別の場所にどのくらい速く転送されるかの普遍的な計測基準。" - -msgid "" -"The valid model values depend on the ``libvirt_type`` setting, as shown in " -"the following tables." -msgstr "" -"有効なモデルの値は、以下の表にあるように ``libvirt_type`` 設定により左右され" -"ます。" - -msgid "" -"The volume must be in an ``available`` state or the request will be denied. " -"If the transfer request is valid in the database (that is, it has not " -"expired or been deleted), the volume is placed in an ``awaiting transfer`` " -"state. For example:" -msgstr "" -"ボリュームが ``利用可能`` の状態でない場合、要求は却下されます。転送要求が" -"データベース内で有効な場合 (失効していない、または削除されていない場合)、ボ" -"リュームは ``awaiting transfer`` の状態になります。たとえば、 " - -msgid "" -"The web-based management interface for OpenStack. An alternative name for " -"horizon." -msgstr "OpenStack 用 Web ベース管理インターフェース。Horizon の別名。" - -msgid "Then install pip and use it to manage client installation:" -msgstr "pip をインストールして、インストールするクライアントを管理します。" - -msgid "Then you can install the packages:" -msgstr "そして、パッケージをインストールします。" - -msgid "" -"Theoretically, OpenStack Compute can support any database that SQL-Alchemy " -"supports. Common databases are SQLite3 for test and development work, MySQL, " -"and PostgreSQL." -msgstr "" -"理論的には、OpenStack Compute は SQL-Alchemy がサポートするデータベースをすべ" -"てサポートします。一般的に使用されているデータベースは、テスト・開発用には " -"SQLite3、それ以外では MySQL や PostgreSQL です。" - -msgid "" -"There are also packaged versions of the clients available in `RDO `__ that enable yum to install the clients as described " -"in Installing_from_packages_." -msgstr "" -"Installing_from_packages_ に記載されているように、yum を使用してインストール" -"できるクライアントパッケージも `RDO `__ にありま" -"す。" - -msgid "" -"There are also packaged versions of the clients available that enable zypper " -"to install the clients as described in Installing_from_packages_." -msgstr "" -"Installing_from_packages_ に記載されているように、zypper を使用してインストー" -"ルできるクライアントパッケージもあります。" - -msgid "" -"These services communicate by using the OpenStack messaging bus. Only the " -"collector and API server have access to the data store." -msgstr "" -"これらのサービスは OpenStack のメッセージバスを使って通信します。コレクター" -"と API サーバーだけがデータストアにアクセスできます。" - -msgid "" -"These values are computed by using information about the flavors of the " -"instances that run on the hosts. This command does not query the CPU usage, " -"memory usage, or hard disk usage of the physical host." -msgstr "" -"これらの値は、ホストで実行されるインスタンスのフレーバーに関する情報を使用し" -"て計算されます。このコマンドは、物理ホストの CPU の使用状況、メモリーの使用状" -"況、ハードディスクの使用状況の問い合わせは行いません。" - -msgid "This example creates a ``my-new-volume`` volume based on an image." -msgstr "" -"以下の例では、イメージをベースにして ``my-new-volume`` ボリュームを作成しま" -"す。" - -msgid "This example is a high-level process flow for using Database services:" -msgstr "" -"この例は、Database サービスを使用するための高レベルなプロセスフローです。" - -msgid "" -"This glossary offers a list of terms and definitions to define a vocabulary " -"for OpenStack-related concepts." -msgstr "" -"この用語集は、OpenStack 関連の概念の語彙を定義するための用語や定義の一覧を提" -"供します。" - -msgid "This guide focuses on the ``local_settings.py`` file." -msgstr "このガイドは ``local_settings.py`` ファイルでの設定を扱います。" - -msgid "This section describes OpenStack services in detail." -msgstr "このセクションでは OpenStack のサービスを詳しく説明します。" - -msgid "" -"To avoid storing the password in plain text, you can prompt for the " -"OpenStack password interactively." -msgstr "" -"OpenStack のパスワードを対話的に入力して、パスワードを平文で保存することを避" -"けることもできます。" - -msgid "To create an image, use :command:`glance image-create`:" -msgstr ":command:`glance image-create` を使用して、イメージを作成します。" - -msgid "" -"To delete your volume, you must first detach it from the server. To detach " -"the volume from your server and check for the list of existing volumes, see " -"steps 1 and 2 in Resize_a_volume_." -msgstr "" -" ボリュームを削除するには、まずサーバーからボリュームを切り離す必要がありま" -"す。サーバーからボリュームを切り離して既存のボリューム一覧を確認するには、" -"Resize_a_volume_ のステップ 1 と 2 を参照します。" - -msgid "" -"To design, deploy, and configure OpenStack, administrators must understand " -"the logical architecture." -msgstr "" -"クラウド管理者は、OpenStack を設計、導入、設定するために、論理アーキテク" -"チャーを理解する必要があります。" - -msgid "" -"To get a list of images and to get further details about a single image, " -"use :command:`glance image-list` and :command:`glance image-show` commands." -msgstr "" -"イメージの一覧を取得して単一のイメージの詳細を確認するには、 :command:" -"`glance image-list` と :command:`glance image-show` コマンドを使用します。" - -msgid "" -"To install the clients on a Linux, Mac OS X, or Microsoft Windows system, " -"use pip. It is easy to use, ensures that you get the latest version of the " -"clients from the `Python Package Index `__, and " -"lets you update or remove the packages later on." -msgstr "" -"Linux、Mac OS X、Microsoft Windows システムにクライアントをインストールする場" -"合、pip を使用します。これは使いやすく、 `Python Package Index `__ からきちんと最新版のクライアントを取得します。また、後から更" -"新や削除することもできます。" - -msgid "" -"To provide feedback on documentation, join and use the openstack-docs@lists." -"openstack.org mailing list at `OpenStack Documentation Mailing List `__, or `report " -"a bug `__." -msgstr "" -"ドキュメントにフィードバックを行う場合は、`OpenStack Documentation メーリング" -"リスト `__ にある openstack-docs@lists.openstack.org メーリングリストに参加して" -"ください。または、`バグ報告 `__ をしてください。" - -msgid "To remove the client, run the :command:`pip uninstall` command:" -msgstr "" -"クライアントを削除する場合、 :command:`pip uninstall` コマンドを実行します。" - -msgid "" -"To resize your volume, you must first detach it from the server. To detach " -"the volume from your server, pass the server ID and volume ID to the " -"following command:" -msgstr "" -"ボリュームをリサイズするには、まずボリュームをサーバーから切り離します。以下" -"のコマンドでサーバー ID とボリューム ID を指定して、サーバーからボリュームを" -"切り離します。" - -msgid "" -"To set the required environment variables for the OpenStack command-line " -"clients, you must create an environment file called an OpenStack rc file, or " -"``openrc.sh`` file. If your OpenStack installation provides it, you can " -"download the file from the OpenStack dashboard as an administrative user or " -"any other user. This project-specific environment file contains the " -"credentials that all OpenStack services use." -msgstr "" -"OpenStack コマンドラインクライアントに必要な環境変数を設定するには、" -"OpenStack rc ファイルと呼ばれる環境ファイルまたは ``openrc.sh`` ファイルを作" -"成する必要があります。OpenStack のインストールでこのファイルが提供される場合" -"は、管理ユーザーまたはその他のユーザーで OpenStack Dashboard からダウンロード" -"してください。このプロジェクト固有の環境ファイルには、OpenStack サービスが使" -"用する認証情報が含まれます。 " - -msgid "" -"To store location metadata for images, which enables direct file access for " -"a client, update the ``/etc/glance/glance-api.conf`` file with the following " -"statements:" -msgstr "" -"イメージの場所のメタデータを保存してクライアントが直接ファイルにアクセスでき" -"るようにするには、``/etc/glance/glance-api.conf`` ファイルで以下のステートメ" -"ントを追加更新します。 " - -msgid "To update an image by name or ID, use :command:`glance image-update`:" -msgstr "" -":command:`glance image-update` を使用して、名前または ID によりイメージを更新" -"します。" - -msgid "" -"To upgrade a client, add the :option:`--upgrade` option to the :command:`pip " -"install` command:" -msgstr "" -":command:`pip install` コマンドを :option:`--upgrade` オプションを追加して、" -"クライアントをアップグレードします。" - -msgid "" -"To verify that your volume was created successfully, list the available " -"volumes:" -msgstr "" -"ボリュームが正常に作成されたことを確認するには、利用可能なボリュームを一覧表" -"示します。" - -msgid "" -"To view your changes reload your dashboard. If necessary go back and modify " -"your CSS file as appropriate." -msgstr "" -"ダッシュボードを再読み込みして、変更を表示します。必要があれば、上に戻って " -"CSS ファイルを適切に変更してください。" - -msgid "" -"Tool used for maintaining Address Resolution Protocol packet filter rules in " -"the Linux kernel firewall modules. Used along with iptables, ebtables, and " -"ip6tables in Compute to provide firewall services for VMs." -msgstr "" -"Linux カーネルファイアウォールモジュールで ARP パケットフィルタールールを維持" -"するために使用されるツール。仮想マシン向けのファイアウォールサービスを提供す" -"るために、Compute で iptables、ebtables、ip6tables と一緒に使用される。" - -msgid "" -"Tool used in OpenStack development to ensure correctly ordered testing of " -"changes in parallel." -msgstr "" -"OpenStack 開発で使用されているツールで、変更のテストを正しい順番を保証しなが" -"ら並列に実行する。" - -msgid "Tool used to run jobs automatically for OpenStack development." -msgstr "OpenStack 開発のためにジョブを自動的に実行するために使用されるツール。" - -msgid "" -"Tool used to set up, maintain, and inspect the tables of IPv6 packet filter " -"rules in the Linux kernel. In OpenStack Compute, ip6tables is used along " -"with arptables, ebtables, and iptables to create firewalls for both nodes " -"and VMs." -msgstr "" -"Linux カーネルで IPv6 パケットフィルタールールのテーブルをセットアップ、維" -"持、検査するために使用されるツール。OpenStack Compute では、ノードと仮想マシ" -"ンの両方に対するファイアウォールを作成するために、ip6tables が arptables、" -"ebtables、iptables と一緒に使用される。" - -msgid "Torpedo" -msgstr "Torpedo" - -msgid "Transfer a volume" -msgstr "ボリュームの譲渡" - -msgid "TripleO" -msgstr "TripleO" - -msgid "Troubleshoot image creation" -msgstr "イメージ作成のトラブルシューティング" - -msgid "" -"Type of Compute scheduler that evenly distributes instances among available " -"hosts." -msgstr "" -"利用可能なホスト間でインスタンスを平等に分散させる、Compute のスケジューラー" -"の一種。" - -msgid "UUID for each Compute or Image service VM flavor or instance type." -msgstr "" -"Compute や Image service の仮想マシンの各フレーバーやインスタンスタイプの " -"UUID。" - -msgid "UUID used by Image service to uniquely identify each VM image." -msgstr "" -"各仮想マシンイメージを一意に識別するために Image service により使用される " -"UUID。" - -msgid "Ubuntu" -msgstr "Ubuntu" - -msgid "" -"Under the Compute distributed scheduler, this is calculated by looking at " -"the capabilities of each host relative to the flavor of the VM instance " -"being requested." -msgstr "" -"Compute の分散スケジューラーにおいて、要求している仮想マシンインスタンスのフ" -"レーバーに関連する、各ホストのキャパシティーにより計算される。" - -msgid "" -"Unique ID applied to each storage volume under the Block Storage control." -msgstr "" -"Block Storage の管理下にある各ストレージボリュームに適用される一意な ID。" - -msgid "Unique ID assigned to each Networking VIF." -msgstr "各 Networking VIF に割り当てられる一意な ID。" - -msgid "" -"Unique ID assigned to each Object Storage request; used for debugging and " -"tracing." -msgstr "" -"各 Object Storage リクエストに割り当てられる一意な ID。デバッグや追跡に使用さ" -"れる。" - -msgid "Unique ID assigned to each guest VM instance." -msgstr "各ゲスト仮想マシンインスタンスに割り当てられる一意な ID。" - -msgid "" -"Unique ID assigned to each network segment within Networking. Same as " -"network UUID." -msgstr "" -"Networking 内の各ネットワークセグメントに割り当てられる一意な ID。ネットワー" -"ク UUID と同じ。" - -msgid "Unique ID assigned to each request sent to Compute." -msgstr "Compute に送られる各リクエストに割り振られる一意な ID。" - -msgid "" -"Unique ID assigned to each service that is available in the Identity service " -"catalog." -msgstr "" -"Identity のサービスカタログで利用可能な各サービスに割り当てられる一意な ID。" - -msgid "" -"Unique ID assigned to each tenant within the Identity service. The project " -"IDs map to the tenant IDs." -msgstr "" -"Identity 内で各テナントに割り当てられる一意な ID。プロジェクト ID は、テナン" -"ト ID に対応付けられる。" - -msgid "Unique ID for a Networking VIF or vNIC in the form of a UUID." -msgstr "Networking 仮想インターフェースや vNIC 用の一意な UUID 形式の ID。" - -msgid "Unique ID for a Networking network segment." -msgstr "Networking ネットワークセグメントの一意な ID。" - -msgid "Unique ID for a Networking port." -msgstr "Networking ポートのユニーク ID。" - -msgid "" -"Unique numeric ID associated with each user in Identity, conceptually " -"similar to a Linux or LDAP UID." -msgstr "" -"Identity で各ユーザーと関連付けられた一意な数値 ID。概念として、Linux や " -"LDAP の UID を同じ。" - -msgid "Uniquely ID for an Object Storage object." -msgstr "Object Storage オブジェクト用の一意な ID。" - -msgid "Upgrade or remove clients" -msgstr "クライアントの更新と削除" - -msgid "" -"Upload your new images to ``/usr/share/openstack-dashboard/" -"openstack_dashboard/static/dashboard/img/``." -msgstr "" -"新しいイメージを ``/usr/share/openstack-dashboard/openstack_dashboard/static/" -"dashboard/img/`` にアップロードします。" - -msgid "" -"Use OpenStack Compute to host and manage cloud computing systems. OpenStack " -"Compute is a major part of an Infrastructure-as-a-Service (:term:`IaaS`) " -"system. The main modules are implemented in Python." -msgstr "" -"OpenStack Compute を使用して、クラウドコンピューティングシステムを動かし管理" -"を行います。OpenStack Compute は、Infrastructure-as-a-Service (:term:`IaaS`) " -"システムの主要な要素です。主要なモジュールは Python で実装されています。" - -msgid "Use cases include:" -msgstr "以下のユースケースがあります。" - -msgid "" -"Use pip to install the OpenStack clients on a Linux, Mac OS X, or Microsoft " -"Windows system. It is easy to use and ensures that you get the latest " -"version of the client from the `Python Package Index `__. Also, pip enables you to update or remove a package." -msgstr "" -"Linux、Mac OS X、Microsoft Windows システムに OpenStack クライアントをインス" -"トールする場合、pip を使用します。これは使いやすく、きちんと最新版のクライア" -"ントを `Python Package Index `__ から取得しま" -"す。また、パッケージを更新したり削除したりできます。" - -msgid "" -"Use the :command:`trove list` command to get the ID of the instance, " -"followed by the :command:`trove show` command to get the IP address of it." -msgstr "" -":command:`trove list` コマンドを使用して、インスタンスの ID を取得します。続" -"けて、:command:`trove show` コマンドを使用して、その IP アドレスを取得しま" -"す。" - -msgid "" -"Use the :command:`trove-manage` command to import images and offer them to " -"tenants." -msgstr "" -":command:`trove-manage` コマンドを使用して、イメージをインポートして、それら" -"をテナントに提供します。" - -msgid "" -"Used along with an EC2 access key when communicating with the Compute EC2 " -"API; used to digitally sign each request." -msgstr "" -"Compute EC2 API 利用時に EC2 アクセスキーと一緒に使用される。各リクエストを電" -"子署名するために使用される。" - -msgid "Used along with an EC2 secret key to access the Compute EC2 API." -msgstr "Compute EC2 API にアクセスするために、EC2 秘密鍵と一緒に使用される。" - -msgid "Used along with an EKI to create an EMI." -msgstr "EMI を作成するために、EKI と一緒に使用する。" - -msgid "Used along with an ERI to create an EMI." -msgstr "EMI を作成するために、ERI と一緒に使用する。" - -msgid "" -"Used along with arptables and ebtables, iptables create firewalls in " -"Compute. iptables are the tables provided by the Linux kernel firewall " -"(implemented as different Netfilter modules) and the chains and rules it " -"stores. Different kernel modules and programs are currently used for " -"different protocols: iptables applies to IPv4, ip6tables to IPv6, arptables " -"to ARP, and ebtables to Ethernet frames. Requires root privilege to " -"manipulate." -msgstr "" -"Compute においてファイアウォールを作成する、arptables、ebtables、iptables と" -"一緒に使用される。iptables は、Linux カーネルファイアウォール (別の " -"Netfilter モジュール) により提供されるテーブル、それを保存するチェインやルー" -"ル。複数のカーネルモジュールとプログラムが、別々のプロトコルに対して使用され" -"る。iptables は IPv4、ip6tables は IPv6、arptables は ARP、ebtables は " -"Ethernet フレームに適用される。操作すうために root 権限が必要になる。" - -msgid "" -"Used by Image service to obtain images on the local host rather than re-" -"downloading them from the image server each time one is requested." -msgstr "" -"イメージが要求されたときに、イメージサーバーから再ダウンロードするのではな" -"く、ローカルホストにあるイメージを取得するために、Image service により使用さ" -"れる。" - -msgid "" -"Used by Object Storage devices to determine which storage devices are " -"suitable for the job. Devices are weighted by size." -msgstr "" -"どのストレージデバイスがジョブに対して適切であるかを判断するために、Object " -"Storage デバイスにより使用される。デバイスは容量により重み付けされる。" - -msgid "" -"Used by Object Storage to determine the location of an object in the ring. " -"Maps objects to partitions." -msgstr "" -"リング内でオブジェクトの場所を判断するために、Object Storage により使用され" -"る。オブジェクトをパーティションに対応付ける。" - -msgid "" -"Used by Object Storage to determine which partition data should reside on." -msgstr "" -"パーティションデータが配置されるべき場所を決めるために、Object Storage により" -"使用される。" - -msgid "Used by Object Storage to push object replicas." -msgstr "" -"オブジェクトの複製をプッシュするために Object Storage により使用される。" - -msgid "" -"Used by most OpenStack Networking installations to route information between " -"the neutron-server and various agents. Also acts as a database to store " -"networking state for particular plug-ins." -msgstr "" -"ほとんどの OpenStack Networking のインストール環境において、neutron-server と" -"各種エージェント間での情報の転送に使用されます。プラグインによってはネット" -"ワーク状態を保存するのにも使用されます。" - -msgid "Used for adding additional persistent storage to a virtual machine (VM)" -msgstr "永続的なストレージを仮想マシン(VM)へ追加するために使用される" - -msgid "Used for providing file shares to a virtual machine" -msgstr "ファイル共有を仮想マシンに提供するために使用される" - -msgid "Used for storing virtual machine images and data" -msgstr "仮想マシンイメージとデータを保存するために使用される" - -msgid "" -"Used to mark Object Storage objects that have been deleted; ensures that the " -"object is not updated on another node after it has been deleted." -msgstr "" -"Object Storage のオブジェクトが削除済みであることを示す印をつけるために使用さ" -"れる。オブジェクトの削除後、他のノードにおいて更新されないことを保証する。" - -msgid "" -"Used to restrict communications between hosts and/or nodes, implemented in " -"Compute using iptables, arptables, ip6tables, and ebtables." -msgstr "" -"ホストノード間の通信を制限する為に使用される。iptables, arptables, " -"ip6tables, ebtables を使用して Compute により実装される。" - -msgid "Used to track segments of a large object within Object Storage." -msgstr "Object Storage 内で大きなオブジェクトを管理するために使用される。" - -msgid "User Mode Linux (UML)" -msgstr "User Mode Linux (UML)" - -msgid "User can specify `volume type` when creating a volume." -msgstr "ユーザーは、ボリューム作成時に `volume type` を指定できます。" - -msgid "User-defined alphanumeric string in Compute; the name of a project." -msgstr "Compute でユーザーが定義した英数文字列。プロジェクトの名前。" - -msgid "User-friendly UI for ad-hoc analytics queries based on Hive or Pig." -msgstr "" -"Hive や Pig をベースにした、アドホックな分析クエリー向けのユーザーフレンド" -"リーな UI。" - -msgid "" -"Users of Object Storage interact with the service through the proxy server, " -"which in turn looks up the location of the requested data within the ring " -"and returns the results to the user." -msgstr "" -"Object Storage のユーザーは、リング中にあるリクエストされたデータの場所を参照" -"してユーザに結果を返すプロキシサーバーを介して、このサービスに通信する。" - -msgid "" -"Utilization of unused compute power from general purpose OpenStack IaaS " -"cloud." -msgstr "" -"汎用的な OpenStack IaaS クラウドの使用されていないコンピュートリソースの活用" - -msgid "VIF UUID" -msgstr "VIF UUID" - -msgid "VIP" -msgstr "仮想 IP" - -msgid "VLAN manager" -msgstr "VLAN マネージャー" - -msgid "VLAN network" -msgstr "VLAN ネットワーク" - -msgid "VM Remote Control (VMRC)" -msgstr "VM Remote Control (VMRC)" - -msgid "VM disk (VMDK)" -msgstr "VM disk (VMDK)" - -msgid "VM image" -msgstr "仮想マシンイメージ" - -msgid "VM image container format supported by Image service." -msgstr "Image service によりサポートされる仮想マシンイメージのコンテナー形式。" - -msgid "VMware API" -msgstr "VMware API" - -msgid "VMware NSX Neutron plug-in" -msgstr "VMware NSX Neutron プラグイン" - -msgid "VMwareAPI for VMware" -msgstr "VMware 向けの VMwareAPI" - -msgid "VNC proxy" -msgstr "VNC プロキシ" - -msgid "VXLAN" -msgstr "VXLAN" - -msgid "Various periodic processes" -msgstr "さまざまな定期タスク" - -msgid "" -"Various repository types are supported including normal file systems, Object " -"Storage, RADOS block devices, HTTP, and Amazon S3. Note that some " -"repositories will only support read-only usage." -msgstr "" -"さまざまな種類のリポジトリーがサポートされており、通常のファイルシステム、" -"Object Storage、RADOS ブロックデバイス、HTTP、Amazon S3 などがあります。いく" -"つかのリポジトリーでは、読み込み専用の利用だけがサポートされている点に注意し" -"てください。" - -msgid "" -"Verify that transfer list is now empty and that the volume is again " -"available for transfer:" -msgstr "転送一覧が空になり、ボリュームが転送に使用できることを確認します。" - -msgid "View pending transfers:" -msgstr " 待機中の転送を確認します。" - -msgid "Virtual Central Processing Unit (vCPU)" -msgstr "仮想CPU (vCPU)" - -msgid "Virtual Disk Image (VDI)" -msgstr "Virtual Disk Image (VDI)" - -msgid "Virtual Hard Disk (VHD)" -msgstr "Virtual Hard Disk (VHD)" - -msgid "Virtual Network Computing (VNC)" -msgstr "Virtual Network Computing (VNC)" - -msgid "Virtual Network InterFace (VIF)" -msgstr "仮想ネットワークインタフェース (VIF)" - -msgid "" -"Virtual network type that uses neither VLANs nor tunnels to segregate tenant " -"traffic. Each flat network typically requires a separate underlying physical " -"interface defined by bridge mappings. However, a flat network can contain " -"multiple subnets." -msgstr "" -"テナントの通信を分離するために、VLAN もトンネルも使用しない仮想ネットワーク方" -"式。各フラットネットワークは、一般的にブリッジマッピングにより定義された、" -"バックエンドに専用の物理インターフェースを必要とする。しかしながら、フラット" -"ネットワークは複数のサブネットを含められる。" - -msgid "VirtualBox" -msgstr "VirtualBox" - -msgid "VirtualE1000" -msgstr "VirtualE1000" - -msgid "VirtualPCNet32" -msgstr "VirtualPCNet32" - -msgid "VirtualVmxnet" -msgstr "VirtualVmxnet" - -msgid "" -"Virtualization API library used by OpenStack to interact with many of its " -"supported hypervisors." -msgstr "" -"多くのサポートハイパーバイザーと通信するために、OpenStack により使用される仮" -"想化 API ライブラリー。" - -msgid "Volume API" -msgstr "Volume API" - -msgid "" -"Volume that does not save the changes made to it and reverts to its original " -"state when the current user relinquishes control." -msgstr "" -"変更が保存されないボリューム。現在のユーザーが制御を解放したとき、元の状態に" -"戻される。" - -msgid "WSGI middleware" -msgstr "WSGI ミドルウェア" - -msgid "" -"WSGI middleware component of Object Storage that serves container data as a " -"static web page." -msgstr "" -"コンテナーデータを静的 Web ページとして取り扱う Object Storage の WSGI ミドル" -"ウェアコンポーネント。" - -msgid "What's next" -msgstr "次の手順" - -msgid "" -"When installing OpenStack Identity service, you must register each service " -"in your OpenStack installation. Identity service can then track which " -"OpenStack services are installed, and where they are located on the network." -msgstr "" -"OpenStack Identity をインストールする際に、OpenStack の各サービスを登録する必" -"要があります。これにより、Identity サービスは、OpenStack のサービスがインス" -"トールされていること、それらがネットワーク上のどこにあるかを把握できます。" - -msgid "" -"When the volume is fully deleted, it disappears from the list of volumes:" -msgstr "" -"ボリュームが完全に削除されると、ボリュームの一覧には表示されなくなります。" - -msgid "" -"When viewing a list of images, you can also use ``grep`` to filter the list, " -"as follows:" -msgstr "" -"以下のように、イメージ一覧の確認の際に、``grep`` を使用して一覧をフィルタリン" -"グすることができます。 " - -msgid "" -"When you are prompted for an OpenStack password, enter the password for the " -"user who downloaded the ``PROJECT-openrc.sh`` file." -msgstr "" -"OpenStack パスワードの入力プロンプトが表示されたとき、``PROJECT-openrc.sh`` " -"ファイルをダウンロードしたユーザーのパスワードを入力します。" - -msgid "" -"When you run OpenStack client commands, you can override some environment " -"variable settings by using the options that are listed at the end of the " -"``help`` output of the various client commands. For example, you can " -"override the ``OS_PASSWORD`` setting in the ``PROJECT-openrc.sh`` file by " -"specifying a password on a :command:`openstack` command, as follows:" -msgstr "" -"OpenStack クライアントコマンドを実行するとき、さまざまなクライアントコマンド" -"の ``help`` 出力の最後に一覧表示されるオプションを使用することにより、いくつ" -"かの環境変数を上書きできます。たとえば以下のように、:command:`openstack` コ" -"マンドにパスワードを指定することにより、``PROJECT-openrc.sh`` ファイルで設定" -"した ``OS_PASSWORD`` 設定を上書きできます。" - -msgid "" -"When you source the file, environment variables are set for your current " -"shell. The variables enable the OpenStack client commands to communicate " -"with the OpenStack services that run in the cloud." -msgstr "" -"このファイルを読み込むと、環境変数が現在のシェルに対して設定されます。この変" -"数により OpenStack クライアントコマンドがクラウドで実行中の OpenStack サービ" -"スとやりとりできるようになります。" - -msgid "" -"When you use OpenStack with VMware vCenter Server, you need to specify the " -"``vmware_disktype`` and ``vmware_adaptertype`` properties with :command:" -"`glance image-create`. Also, we recommend that you set the ``hypervisor_type=" -"\"vmware\"`` property. For more information, see `Images with VMware vSphere " -"`_ in the OpenStack Configuration Reference." -msgstr "" -"OpenStack を VMware vCenter Server と一緒に使用している場合、:command:" -"`glance image-create` を用いて ``vmware_disktype`` と ``vmware_adaptertype`` " -"プロパティーを指定する必要があります。また、``hypervisor_type=\"vmware\"`` プ" -"ロパティーを設定することを推奨します。詳細は *OpenStack Configuration " -"Reference* の `Images with VMware vSphere `_ を参照してください。" - -msgid "Where ``PASSWORD`` is your password." -msgstr "ここで ``PASSWORD`` は、お使いのパスワードです。" - -msgid "While logged in as the volume donor, list the available volumes:" -msgstr "" -"ボリュームの譲渡元としてログインし、利用可能なボリュームを一覧表示します。" - -msgid "" -"While the ``auth_key`` property is visible in the output of ``cinder " -"transfer-create VOLUME_ID``, it will not be available in subsequent ``cinder " -"transfer-show TRANSFER_ID`` commands." -msgstr "" -"``auth_key`` プロパティーが ``cinder transfer-create VOLUME_ID`` の出力に含ま" -"れますが、後続の ``cinder transfer-show TRANSFER_ID`` コマンドではこれを利用" -"できません。" - -msgid "" -"While you can install the ``keystone`` client for interacting with version " -"2.0 of the service's API, you should use the ``openstack`` client for all " -"Identity interactions. Identity API v2 is deprecated in the Mitaka release." -msgstr "" -"Identity API バージョン 2.0 を利用するために ``keystone`` クライアントをイン" -"ストールできますが、すべての Identity 処理に ``openstack`` クライアントを使用" -"すべきです。Identity API v2 は、Mitaka リリースで非推奨になっています。" - -msgid "" -"Within RabbitMQ and Compute, it is the messaging interface that is used by " -"the scheduler service to receive capability messages from the compute, " -"volume, and network nodes." -msgstr "" -"RabbitMQ と Compute の中で、コンピュートノード、ボリュームノード、ネットワー" -"クノードからのメッセージを受け付ける機能のために、スケジューラーサービスによ" -"り使用されるメッセージングインターフェース。" - -msgid "Work in progress - expected for the Mitaka release" -msgstr "Mitaka リリースに向けて開発中" - -msgid "Workflow service" -msgstr "Workflow サービス" - -msgid "Workflow service for OpenStack cloud." -msgstr "OpenStack クラウド向け Workflow サービス。" - -msgid "XFS" -msgstr "XFS" - -msgid "Xen" -msgstr "Xen" - -msgid "Xen API" -msgstr "Xen API" - -msgid "Xen Cloud Platform (XCP)" -msgstr "Xen Cloud Platform (XCP)" - -msgid "Xen Storage Manager Volume Driver" -msgstr "Xen Storage Manager Volume Driver" - -msgid "" -"Xen is a hypervisor using a microkernel design, providing services that " -"allow multiple computer operating systems to execute on the same computer " -"hardware concurrently." -msgstr "" -"Xen は、マイクロカーネル設計を使用したハイパーバイザー。複数のコンピューター" -"オペレーティングシステムを同じコンピューターハードウェアで同時に実行できるよ" -"うになるサービスを提供する。" - -msgid "XenAPI for XenServer/XCP" -msgstr "XenServer/XCP 向けの XenAPI" - -msgid "XenServer" -msgstr "XenServer" - -msgid "" -"You are not prompted for the password with this method. The password lives " -"in clear text format in the ``PROJECT-openrc.sh`` file. Restrict the " -"permissions on this file to avoid security problems. You can also remove the " -"``OS_PASSWORD`` variable from the file, and use the :option:`--password` " -"parameter with OpenStack client commands instead." -msgstr "" -"この方法を用いると、パスワードを聞かれません。パスワードは ``PROJECT-openrc." -"sh`` ファイルに平文で記載されています。セキュリティー問題を避けるために、この" -"ファイルのパーミッションを制限します。このファイルから ``OS_PASSWORD`` 変数を" -"削除し、OpenStack クライアントコマンドで :option:`--password` パラメーターを" -"使用することもできます。" - -msgid "You can install pip and use it to manage client installation:" -msgstr "" -"pip をインストールして、インストールするクライアントを管理することができま" -"す。" - -msgid "" -"You can run the commands from the command line, or include the commands " -"within scripts to automate tasks. If you provide OpenStack credentials, such " -"as your user name and password, you can run these commands on any computer." -msgstr "" -"コマンドライン、または作業を自動化するスクリプトの中からコマンドを実行できま" -"す。ユーザー名とパスワードのような、OpenStack のクレデンシャルを指定すると、" -"どのコンピューターでもこれらのコマンドを実行できます。" - -msgid "" -"You can show basic statistics on resource usage for hosts and instances." -msgstr "" -"ホストやインスタンスのリソース使用状況に関する基本的な統計を表示できます。" - -msgid "" -"You can transfer a volume from one owner to another by using the :command:" -"`cinder transfer*` commands. The volume donor, or original owner, creates a " -"transfer request and sends the created transfer ID and authorization key to " -"the volume recipient. The volume recipient, or new owner, accepts the " -"transfer by using the ID and key." -msgstr "" -":command:`cinder transfer*` コマンドを使用して、別の所有者に転送することがで" -"きます。ボリュームドナーまたは元の所有者が転送要求を作成し、作成した転送 ID " -"と認証キーをボリュームの転送先に送信します。ボリュームの転送先または新規所有" -"者が ID とキーを使用して転送を確定します。 " - -msgid "" -"You can upload images through the ``glance`` client or the Image service " -"API. You can use the ``nova`` client for the image management. The latter " -"provides mechanisms to list and delete images, set and delete image " -"metadata, and create images of a running instance or snapshot and backup " -"types." -msgstr "" -"``glance`` クライアントまたは Image service API 経由でイメージをアップロード" -"できます。イメージ管理のために ``nova`` クライアントを使用できます。後者は、" -"イメージの一覧表示や削除、イメージのメタデータの設定や削除、実行中のインスタ" -"ンスのスナップショットやバックアップの作成、などの機能を提供します。" - -msgid "" -"You must set the ``OS_CACERT`` environment variable when using the https " -"protocol in the ``OS_AUTH_URL`` environment setting because the verification " -"process for the TLS (HTTPS) server certificate uses the one indicated in the " -"environment. This certificate will be used when verifying the TLS (HTTPS) " -"server certificate." -msgstr "" -"``OS_AUTH_URL`` 環境設定に HTTPS プロトコルを使用する場合、TLS (HTTPS) のサー" -"バー証明書を検証するプロセスが、環境において指定されたものを使用するため、" -"``OS_CACERT`` 環境変数を使用する必要があります。TLS (HTTPS) サーバー証明書を" -"検証するとき、この証明書が使用されます。" - -msgid "ZeroMQ" -msgstr "ZeroMQ" - -msgid "Zuul" -msgstr "Zuul" - -msgid "" -"`API Complete Reference (HTML) `__" -msgstr "" -"`API Complete Reference (HTML) `__" - -msgid "" -"`API Complete Reference (PDF) `__" -msgstr "" -"`API Complete Reference (PDF) `__" - -msgid "`API Guide `__" -msgstr "`API ガイド `__" - -msgid "`Administrator Guide `__" -msgstr "`Administrator Guide `__" - -msgid "`Architecture Design Guide `__" -msgstr "" -"`アーキテクチャー設計ガイド `__" - -msgid "" -"`Block Storage `__" -msgstr "" -"`Block Storage `__" - -msgid "" -"`Bugs: Application catalog (murano) `__" -msgstr "" -"`バグ: Application catalog (murano) `__" - -msgid "" -"`Bugs: Bare metal service (ironic) `__" -msgstr "" -"`バグ: Bare metal サービス (ironic) `__" - -msgid "" -"`Bugs: Clustering service (senlin) `__" -msgstr "" -"`バグ: Clustering サービス (senlin) `__" - -msgid "" -"`Bugs: Containers service (magnum) `__" -msgstr "" -"`バグ: Containers サービス (magnum) `__" - -msgid "" -"`Bugs: DNS service (designate) `__" -msgstr "" -"`バグ: DNS サービス (designate) `__" - -msgid "" -"`Bugs: Data processing service (sahara) `__" -msgstr "" -"`バグ: Data processing サービス (sahara) `__" - -msgid "`Bugs: Database service (trove) `__" -msgstr "`バグ: Database サービス (trove) `__" - -msgid "`Bugs: Deployment service (fuel) `__" -msgstr "`バグ: Deployment サービス (fuel) `__" - -msgid "" -"`Bugs: Key Manager Service (barbican) `__" -msgstr "" -"`バグ: Key Manager サービス (barbican) `__" - -msgid "`Bugs: Messaging service (zaqar) `__" -msgstr "" -"`バグ: Messaging サービス (zaqar) `__" - -msgid "`Bugs: Monitoring (monasca) `__" -msgstr "`バグ: Monitoring (monasca) `__" - -msgid "" -"`Bugs: OpenStack API Documentation (developer.openstack.org) `__" -msgstr "" -"`バグ: OpenStack API Documentation (developer.openstack.org) `__" - -msgid "" -"`Bugs: OpenStack Block Storage (cinder) `__" -msgstr "" -"`バグ: OpenStack Block Storage (cinder) `__" - -msgid "`Bugs: OpenStack Compute (nova) `__" -msgstr "`バグ: OpenStack Compute (nova) `__" - -msgid "" -"`Bugs: OpenStack Dashboard (horizon) `__" -msgstr "" -"`バグ: OpenStack Dashboard (horizon) `__" - -msgid "" -"`Bugs: OpenStack Documentation (docs.openstack.org) `__" -msgstr "" -"`バグ: OpenStack Documentation (docs.openstack.org) `__" - -msgid "" -"`Bugs: OpenStack Identity (keystone) `__" -msgstr "" -"`バグ: OpenStack Identity (keystone) `__" - -msgid "" -"`Bugs: OpenStack Image service (glance) `__" -msgstr "" -"`バグ: OpenStack Image サービス (glance) `__" - -msgid "" -"`Bugs: OpenStack Networking (neutron) `__" -msgstr "" -"`バグ: OpenStack Networking (neutron) `__" - -msgid "" -"`Bugs: OpenStack Object Storage (swift) `__" -msgstr "" -"`バグ: OpenStack Object Storage (swift) `__" - -msgid "`Bugs: Orchestration (heat) `__" -msgstr "`バグ: Orchestration (heat) `__" - -msgid "`Bugs: Rating (cloudkitty) `__" -msgstr "`バグ: Rating (cloudkitty) `__" - -msgid "" -"`Bugs: Shared file systems (manila) `__" -msgstr "" -"`バグ: Shared file systems (manila) `__" - -msgid "" -"`Bugs: Telemetry (ceilometer) `__" -msgstr "" -"`バグ: Telemetry (ceilometer) `__" - -msgid "`Bugs: Telemetry v3 (gnocchi) `__" -msgstr "`バグ: Telemetry v3 (gnocchi) `__" - -msgid "" -"`Bugs: Workflow service (mistral) `__" -msgstr "" -"`バグ: Workflow サービス (mistral) `__" - -msgid "`Ceilometer `__" -msgstr "`Ceilometer `__" - -msgid "`Cinder `__" -msgstr "`Cinder `__" - -msgid "" -"`Command-Line Interface Reference `__" -msgstr "" -"`Command-Line Interface Reference `__" - -msgid "" -"`Compute `__" -msgstr "" -"`Compute `__" - -msgid "" -"`Configuration Reference `__" -msgstr "" -"`Configuration Reference `__" - -msgid "" -"`Dashboard `__" -msgstr "" -"`Dashboard `__" - -msgid "" -"`Data processing service `__" -msgstr "" -"`Data processing service `__" - -msgid "" -"`Database service `__" -msgstr "" -"`Database service `__" - -msgid "" -"`Documentation Contributor Guide `__" -msgstr "" -"`Documentation Contributor Guide `__" - -msgid "`End User Guide `__" -msgstr "`エンドユーザーガイド `__" - -msgid "`Glance `__" -msgstr "`Glance `__" - -msgid "`Heat `__" -msgstr "`Heat `__" - -msgid "`High Availability Guide `__" -msgstr "`高可用性ガイド `__" - -msgid "`Horizon `__" -msgstr "`Horizon `__" - -msgid "" -"`Identity service `__" -msgstr "" -"`Identity service `__" - -msgid "" -"`Image service `__" -msgstr "" -"`Image service `__" - -msgid "" -"`Installation Guide for Red Hat Enterprise Linux 7 and CentOS 7 `__" -msgstr "" -"`インストールガイド Red Hat Enterprise Linux 7, CentOS 7 `__" - -msgid "" -"`Installation Guide for Ubuntu 14.04 (LTS) `__" -msgstr "" -"`インストールガイド Ubuntu 14.04 (LTS) 版 `__" - -msgid "" -"`Installation Guide for openSUSE Leap 42.1 and SUSE Linux Enterprise Server " -"12 SP1 `__" -msgstr "" -"`インストールガイド openSUSE Leap 42.1、SUSE Linux Enterprise Server 12 SP1 " -"版 `__" - -msgid "`Keystone `__" -msgstr "`Keystone `__" - -msgid "" -"`Networking `__" -msgstr "" -"`Networking `__" - -msgid "" -"`Networking Guide `__" -msgstr "" -"`ネットワークガイド `__" - -msgid "`Neutron `__" -msgstr "`Neutron `__" - -msgid "`Nova `__" -msgstr "`Nova `__" - -msgid "" -"`Object Storage `__" -msgstr "" -"`Object Storage `__" - -msgid "`Operations Guide `__" -msgstr "`運用ガイド `__" - -msgid "" -"`Orchestration `__" -msgstr "" -"`Orchestration `__" - -msgid "`Sahara `__" -msgstr "`Sahara `__" - -msgid "`Security Guide `__" -msgstr "" -"`セキュリティーガイド `__" - -msgid "`Swift `__" -msgstr "`Swift `__" - -msgid "" -"`Telemetry `__" -msgstr "" -"`Telemetry `__" - -msgid "`Trove `__" -msgstr "`Trove `__" - -msgid "" -"`Virtual Machine Image Guide `__" -msgstr "" -"`仮想マシンイメージガイド `__" - -msgid "``--checksum CHECKSUM``" -msgstr "``--checksum CHECKSUM``" - -msgid "``--container-format CONTAINER_FORMAT``" -msgstr "``--container-format CONTAINER_FORMAT``" - -msgid "``--copy-from IMAGE_URL``" -msgstr "``--copy-from IMAGE_URL``" - -msgid "``--disk-format DISK_FORMAT``" -msgstr "``--disk-format DISK_FORMAT``" - -msgid "``--file FILE``" -msgstr "``--file FILE``" - -msgid "``--human-readable``" -msgstr "``--human-readable``" - -msgid "``--is-protected [True|False]``" -msgstr "``--is-protected [True|False]``" - -msgid "``--is-public [True|False]``" -msgstr "``--is-public [True|False]``" - -msgid "``--location IMAGE_URL``" -msgstr "``--location IMAGE_URL``" - -msgid "``--min-disk DISK_GB``" -msgstr "``--min-disk DISK_GB``" - -msgid "``--min-ram DISK_RAM``" -msgstr "``--min-ram DISK_RAM``" - -msgid "``--name NAME``" -msgstr "``--name NAME``" - -msgid "``--owner TENANT_ID --size SIZE``" -msgstr "``--owner TENANT_ID --size SIZE``" - -msgid "``--property KEY=VALUE``" -msgstr "``--property KEY=VALUE``" - -msgid "``--purge-props``" -msgstr "``--purge-props``" - -msgid "``IDENTIFIER``" -msgstr "``IDENTIFIER``" - -msgid "``VOLUME_ID``" -msgstr "``VOLUME_ID``" - -msgid "``barbican`` - Key Manager Service API" -msgstr "``barbican`` - Key Manager Service API" - -msgid "``ceilometer`` - Telemetry API" -msgstr "``ceilometer`` - Telemetry API" - -msgid "``cinder`` - Block Storage API and extensions" -msgstr "``cinder`` - Block Storage API および拡張" - -msgid "``cloudkitty`` - Rating service API" -msgstr "``cloudkitty`` - Rating service API" - -msgid "``designate`` - DNS service API" -msgstr "``designate`` - DNS service API" - -msgid "" -"``filesystem_store_metadata_file = filePath``, where filePath points to a " -"JSON file that defines the mount point for OpenStack images on your system " -"and a unique ID. For example:" -msgstr "" -"``filesystem_store_metadata_file = filePath``。filePath は、お使いのシステム" -"の OpenStack イメージのマウントポイントを定義する JSON ファイルおよび一意の " -"ID を参照するようにします。例: " - -msgid "``fuel`` - Deployment service API" -msgstr "``fuel`` - Deployment service API" - -msgid "``glance`` - Image service API" -msgstr "``glance`` - Image service API" - -msgid "``gnocchi`` - Telemetry API v3" -msgstr "``gnocchi`` - Telemetry API v3" - -msgid "``heat-api-cfn`` component" -msgstr "``heat-api-cfn`` コンポーネント" - -msgid "``heat-api`` component" -msgstr "``heat-api`` コンポーネント" - -msgid "``heat-engine``" -msgstr "``heat-engine``" - -msgid "``heat`` - Orchestration API" -msgstr "``heat`` - Orchestration API" - -msgid "``heat`` command-line client" -msgstr "``heat`` コマンドラインクライアント" - -msgid "``keystone`` - Identity service API and extensions" -msgstr "``keystone`` - Identity サービス API および拡張" - -msgid "``magnum`` - Containers service API" -msgstr "``magnum`` - Containers service API" - -msgid "``manila`` - Shared file systems API" -msgstr "``manila`` - Shared file systems API" - -msgid "``mistral`` - Workflow service API" -msgstr "``mistral`` - Workflow service API" - -msgid "``monasca`` - Monitoring API" -msgstr "``monasca`` - Monitoring API" - -msgid "``murano`` - Application catalog API" -msgstr "``murano`` - Application catalog API" - -msgid "``neutron`` - Networking API" -msgstr "``neutron`` - Networking API" - -msgid "``nova-api-metadata`` service" -msgstr "``nova-api-metadata`` サービス" - -msgid "``nova-api`` service" -msgstr "``nova-api`` サービス" - -msgid "``nova-cert`` daemon" -msgstr "``nova-cert`` デーモン" - -msgid "``nova-cert`` module" -msgstr "``nova-cert`` モジュール" - -msgid "``nova-compute`` service" -msgstr "``nova-compute`` サービス" - -msgid "``nova-conductor`` module" -msgstr "``nova-conductor`` モジュール" - -msgid "``nova-consoleauth`` daemon" -msgstr "``nova-consoleauth`` デーモン" - -msgid "``nova-network worker`` daemon" -msgstr "``nova-network worker`` デーモン" - -msgid "``nova-novncproxy`` daemon" -msgstr "``nova-novncproxy`` デーモン" - -msgid "``nova-scheduler`` service" -msgstr "``nova-scheduler`` サービス" - -msgid "``nova-spicehtml5proxy`` daemon" -msgstr "``nova-spicehtml5proxy`` デーモン" - -msgid "``nova-xvpvncproxy`` daemon" -msgstr "``nova-xvpvncproxy`` デーモン" - -msgid "``nova`` - Compute API and extensions" -msgstr "``nova`` - Compute API および拡張" - -msgid "``nova`` client" -msgstr "``nova`` クライアント" - -msgid "``python-troveclient`` command-line client" -msgstr "``python-troveclient`` コマンドラインクライアント" - -msgid "``sahara`` - Data Processing API" -msgstr "``sahara`` - Data Processing API" - -msgid "``senlin`` - Clustering service API" -msgstr "``senlin`` - Clustering service API" - -msgid "``show_multiple_locations = True``" -msgstr "``show_multiple_locations = True``" - -msgid "``swift`` - Object Storage API" -msgstr "``swift`` - Object Storage API" - -msgid "``trove-api`` component" -msgstr "``trove-api`` コンポーネント" - -msgid "``trove-conductor`` service" -msgstr "``trove-conductor`` サービス" - -msgid "``trove-guestagent`` service" -msgstr "``trove-guestagent`` サービス" - -msgid "``trove-taskmanager`` service" -msgstr "``trove-taskmanager`` サービス" - -msgid "``trove`` - Database service API" -msgstr "``trove`` - Database service API" - -msgid "absolute limit" -msgstr "絶対制限" - -msgid "access control list" -msgstr "アクセス制御リスト" - -msgid "access key" -msgstr "アクセスキー" - -msgid "account" -msgstr "アカウント" - -msgid "account auditor" -msgstr "account auditor" - -msgid "account database" -msgstr "アカウントデータベース" - -msgid "account reaper" -msgstr "account reaper" - -msgid "account server" -msgstr "account server" - -msgid "account service" -msgstr "account service" - -msgid "accounting" -msgstr "アカウンティング" - -msgid "active/active configuration" -msgstr "アクティブ/アクティブ設定" - -msgid "active/passive configuration" -msgstr "アクティブ/パッシブ設定" - -msgid "address pool" -msgstr "アドレスプール" - -msgid "admin API" -msgstr "管理 API" - -msgid "admin server" -msgstr "管理サーバー" - -msgid "administrator" -msgstr "管理者" - -msgid "alert" -msgstr "アラート" - -msgid "allocate" -msgstr "確保" - -msgid "applet" -msgstr "アプレット" - -msgid "application server" -msgstr "アプリケーションサーバー" - -msgid "arptables" -msgstr "arptables" - -msgid "ask.openstack.org" -msgstr "ask.openstack.org" - -msgid "associate" -msgstr "割り当て" - -msgid "attach" -msgstr "接続" - -msgid "attachment (network)" -msgstr "アタッチ(ネットワーク)" - -msgid "auditing" -msgstr "監査" - -msgid "auditor" -msgstr "auditor" - -msgid "auth node" -msgstr "認可ノード" - -msgid "authentication" -msgstr "認証" - -msgid "authentication token" -msgstr "認証トークン" - -msgid "authorization" -msgstr "認可" - -msgid "authorization node" -msgstr "認可ノード" - -msgid "auto declare" -msgstr "自動宣言" - -msgid "availability zone" -msgstr "アベイラビリティゾーン" - -msgid "back end" -msgstr "バックエンド" - -msgid "back-end catalog" -msgstr "バックエンドカタログ" - -msgid "back-end store" -msgstr "バックエンドストア" - -msgid "backup restore and disaster recovery as a service" -msgstr "backup restore and disaster recovery as a service" - -msgid "bandwidth" -msgstr "帯域" - -msgid "barbican" -msgstr "barbican" - -msgid "bare" -msgstr "bare" - -msgid "base image" -msgstr "ベースイメージ" - -msgid "binary" -msgstr "バイナリ" - -msgid "bit" -msgstr "ビット" - -msgid "bits per second (BPS)" -msgstr "bps" - -msgid "block device" -msgstr "ブロックデバイス" - -msgid "block migration" -msgstr "ブロックマイグレーション" - -msgid "bootable disk image" -msgstr "ブータブルディスクイメージ" - -msgid "browser" -msgstr "ブラウザー" - -msgid "builder file" -msgstr "ビルダーファイル" - -msgid "bursting" -msgstr "超過利用" - -msgid "button class" -msgstr "ボタンクラス" - -msgid "byte" -msgstr "バイト" - -msgid "cache pruner" -msgstr "cache pruner" - -msgid "capability" -msgstr "キャパシティ" - -msgid "capacity cache" -msgstr "capacity cache" - -msgid "capacity updater" -msgstr "capacity updater" - -msgid "catalog" -msgstr "カタログ" - -msgid "catalog service" -msgstr "カタログサービス" - -msgid "ceilometer" -msgstr "ceilometer" - -msgid "cell" -msgstr "セル" - -msgid "cell forwarding" -msgstr "セルフォワーディング" - -msgid "cell manager" -msgstr "セルマネージャー" - -msgid "certificate authority" -msgstr "認証局" - -msgid "chance scheduler" -msgstr "チャンススケジューラー" - -msgid "changes since" -msgstr "changes since" - -msgid "child cell" -msgstr "子セル" - -msgid "cinder" -msgstr "cinder" - -msgid "cinder-api" -msgstr "cinder-api" - -msgid "cinder-backup daemon" -msgstr "cinder-backup デーモン" - -msgid "cinder-scheduler daemon" -msgstr "cinder-scheduler デーモン" - -msgid "cinder-volume" -msgstr "cinder-volume" - -msgid "cinder_img_volume_type" -msgstr "cinder_img_volume_type" - -msgid "cinder_img_volume_type (via glance image metadata)" -msgstr "cinder_img_volume_type (Image サービスのメタデータ経由)" - -msgid "cloud architect" -msgstr "クラウドアーキテクト" - -msgid "cloud computing" -msgstr "クラウドコンピューティング" - -msgid "cloud controller" -msgstr "クラウドコントローラー" - -msgid "cloud controller node" -msgstr "クラウドコントローラーノード" - -msgid "cloud-init" -msgstr "cloud-init" - -msgid "cloudadmin" -msgstr "cloudadmin" - -msgid "cloudkitty" -msgstr "cloudkitty" - -msgid "cloudpipe" -msgstr "cloudpipe" - -msgid "cloudpipe image" -msgstr "cloudpipe イメージ" - -msgid "command filter" -msgstr "コマンドフィルター" - -msgid "community project" -msgstr "コミュニティープロジェクト" - -msgid "compression" -msgstr "圧縮" - -msgid "compute controller" -msgstr "コンピュートコントローラー" - -msgid "compute host" -msgstr "コンピュートホスト" - -msgid "compute node" -msgstr "コンピュートノード" - -msgid "compute worker" -msgstr "コンピュートワーカー" - -msgid "concatenated object" -msgstr "連結オブジェクト" - -msgid "conductor" -msgstr "コンダクター" - -msgid "congress" -msgstr "congress" - -msgid "consistency window" -msgstr "一貫性ウインドウ" - -msgid "console log" -msgstr "コンソールログ" - -msgid "container" -msgstr "コンテナー" - -msgid "container auditor" -msgstr "コンテナーオーディター" - -msgid "container database" -msgstr "コンテナーデータベース" - -msgid "container format" -msgstr "コンテナーフォーマット" - -msgid "container server" -msgstr "コンテナーサーバー" - -msgid "container service" -msgstr "コンテナーサービス" - -msgid "content delivery network (CDN)" -msgstr "コンテンツ配信ネットワーク (CDN)" - -msgid "controller node" -msgstr "コントローラーノード" - -msgid "core API" -msgstr "コアAPI" - -msgid "core service" -msgstr "コアサービス" - -msgid "cost" -msgstr "コスト" - -msgid "credentials" -msgstr "クレデンシャル" - -msgid "current workload" -msgstr "カレントワークロード" - -msgid "customer" -msgstr "カスタマー" - -msgid "customization module" -msgstr "カスタムモジュール" - -msgid "daemon" -msgstr "デーモン" - -msgid "data encryption" -msgstr "データ暗号化" - -msgid "data store" -msgstr "データストア" - -msgid "database ID" -msgstr "データベース ID" - -msgid "database replicator" -msgstr "データベースレプリケーター" - -msgid "deallocate" -msgstr "割り当て解除" - -msgid "deduplication" -msgstr "重複排除" - -msgid "default panel" -msgstr "デフォルトパネル" - -msgid "default tenant" -msgstr "デフォルトテナント" - -msgid "default token" -msgstr "デフォルトトークン" - -msgid "default_volume_type (via cinder.conf)" -msgstr "default_volume_type (cinder.conf 経由)" - -msgid "delayed delete" -msgstr "遅延削除" - -msgid "delivery mode" -msgstr "デリバリーモード" - -msgid "denial of service (DoS)" -msgstr "サービス妨害 (DoS)" - -msgid "deprecated auth" -msgstr "非推奨認証" - -msgid "designate" -msgstr "designate" - -msgid "developer" -msgstr "developer" - -msgid "device ID" -msgstr "デバイス ID" - -msgid "device weight" -msgstr "デバイスウェイト" - -msgid "direct consumer" -msgstr "直接使用者" - -msgid "direct exchange" -msgstr "直接交換" - -msgid "direct publisher" -msgstr "直接発行者" - -msgid "disassociate" -msgstr "関連付け解除" - -msgid "disk encryption" -msgstr "ディスク暗号化" - -msgid "disk format" -msgstr "ディスクフォーマット" - -msgid "dispersion" -msgstr "dispersion" - -msgid "distributed virtual router (DVR)" -msgstr "分散仮想ルーター (DVR)" - -msgid "dnsmasq" -msgstr "dnsmasq" - -msgid "domain" -msgstr "ドメイン" - -msgid "download" -msgstr "ダウンロード" - -msgid "durable exchange" -msgstr "永続交換" - -msgid "durable queue" -msgstr "永続キュー" - -msgid "e1000" -msgstr "e1000" - -msgid "east-west traffic" -msgstr "イースト・ウエスト通信" - -msgid "ebtables" -msgstr "ebtables" - -msgid "encapsulation" -msgstr "カプセル化" - -msgid "encryption" -msgstr "暗号化" - -msgid "endpoint" -msgstr "エンドポイント" - -msgid "endpoint registry" -msgstr "エンドポイントレジストリ" - -msgid "endpoint template" -msgstr "エンドポイントテンプレート" - -msgid "entity" -msgstr "エンティティー" - -msgid "ephemeral image" -msgstr "一時イメージ" - -msgid "ephemeral volume" -msgstr "一時ボリューム" - -msgid "euca2ools" -msgstr "euca2ools" - -msgid "evacuate" -msgstr "退避" - -msgid "exchange" -msgstr "交換" - -msgid "exchange type" -msgstr "交換種別" - -msgid "exclusive queue" -msgstr "排他キュー" - -msgid "extended attributes (xattr)" -msgstr "拡張属性 (xattr)" - -msgid "extension" -msgstr "エクステンション" - -msgid "external network" -msgstr "外部ネットワーク" - -msgid "extra specs" -msgstr "拡張仕様" - -msgid "fan-out exchange" -msgstr "ファンアウト交換" - -msgid "federated identity" -msgstr "連合認証" - -msgid "fill-first scheduler" -msgstr "充填優先スケジューラー" - -msgid "filter" -msgstr "フィルター" - -msgid "firewall" -msgstr "ファイアウォール" - -msgid "fixed IP address" -msgstr "fixed IP アドレス" - -msgid "flat mode injection" -msgstr "フラットモードインジェクション" - -msgid "flat network" -msgstr "フラットネットワーク" - -msgid "flavor" -msgstr "フレーバー" - -msgid "flavor ID" -msgstr "フレーバー ID" - -msgid "floating IP address" -msgstr "Floating IP アドレス" - -msgid "freezer" -msgstr "freezer" - -msgid "front end" -msgstr "フロントエンド" - -msgid "fuel" -msgstr "fuel" - -msgid "gateway" -msgstr "ゲートウェイ" - -msgid "generic receive offload (GRO)" -msgstr "generic receive offload (GRO)" - -msgid "generic routing encapsulation (GRE)" -msgstr "generic routing encapsulation (GRE)" - -msgid "glance" -msgstr "glance" - -msgid "glance API server" -msgstr "glance API サーバー" - -msgid "glance registry" -msgstr "Glance レジストリ" - -msgid "glance-api" -msgstr "glance-api" - -msgid "glance-registry" -msgstr "glance-registry" - -msgid "global endpoint template" -msgstr "グローバルエンドポイントテンプレート" - -msgid "gnocchi" -msgstr "gnocchi" - -msgid "golden image" -msgstr "ゴールデンイメージ" - -msgid "guest OS" -msgstr "ゲスト OS" - -msgid "handover" -msgstr "handover" - -msgid "hard reboot" -msgstr "ハードリブート" - -msgid "health monitor" -msgstr "ヘルスモニター" - -msgid "heat" -msgstr "heat" - -msgid "high availability (HA)" -msgstr "高可用性" - -msgid "horizon" -msgstr "Horizon" - -msgid "horizon plug-in" -msgstr "horizon プラグイン" - -msgid "host" -msgstr "ホスト" - -msgid "host aggregate" -msgstr "ホストアグリゲート" - -msgid "hybrid cloud" -msgstr "ハイブリッドクラウド" - -msgid "hyperlink" -msgstr "ハイパーリンク" - -msgid "hypervisor" -msgstr "ハイパーバイザー" - -msgid "hypervisor pool" -msgstr "ハイパーバイザープール" - -msgid "iSCSI" -msgstr "iSCSI" - -msgid "" -"iSCSI Qualified Name (IQN) is the format most commonly used for iSCSI names, " -"which uniquely identify nodes in an iSCSI network. All IQNs follow the " -"pattern iqn.yyyy-mm.domain:identifier, where 'yyyy-mm' is the year and month " -"in which the domain was registered, 'domain' is the reversed domain name of " -"the issuing organization, and 'identifier' is an optional string which makes " -"each IQN under the same domain unique. For example, 'iqn.2015-10.org." -"openstack.408ae959bce1'." -msgstr "" -"iSCSI Qualified Name (IQN) は iSCSI の名前として最も広く使われている形式で、 " -"iSCSI ネットワークで一意にノードを識別するのに使われます。すべての IQN は " -"iqn.yyyy-mm.domain:identifier という形式です。ここで、 'yyyy-mm' はそのドメイ" -"ンが登録された年と月、 'domain' は発行組織の登録されたドメイン名、 " -"'identifier' は同じドメイン内の各 IQN 番号を一意なものにするためのオプション" -"文字列です。例えば 'iqn.2015-10.org.openstack.408ae959bce1'" - -msgid "ide" -msgstr "ide" - -msgid "identity provider" -msgstr "識別情報プロバイダー" - -msgid "image" -msgstr "イメージ" - -msgid "image ID" -msgstr "イメージ ID" - -msgid "image UUID" -msgstr "イメージ UUID" - -msgid "image cache" -msgstr "イメージキャッシュ" - -msgid "image membership" -msgstr "イメージメンバーシップ" - -msgid "image owner" -msgstr "イメージ所有者" - -msgid "image registry" -msgstr "イメージレジストリー" - -msgid "image status" -msgstr "イメージ状態" - -msgid "image store" -msgstr "イメージストア" - -msgid "incubated project" -msgstr "インキュベートプロジェクト" - -msgid "ingress filtering" -msgstr "イングレスフィルタリング" - -msgid "injection" -msgstr "インジェクション" - -msgid "instance" -msgstr "インスタンス" - -msgid "instance ID" -msgstr "インスタンス ID" - -msgid "instance UUID" -msgstr "インスタンス UUID" - -msgid "instance state" -msgstr "インスタンス状態" - -msgid "instance tunnels network" -msgstr "インスタンストンネルネットワーク" - -msgid "instance type" -msgstr "インスタンスタイプ" - -msgid "instance type ID" -msgstr "インスタンスタイプ ID" - -msgid "interface" -msgstr "インターフェース" - -msgid "interface ID" -msgstr "インターフェース ID" - -msgid "ip6tables" -msgstr "ip6tables" - -msgid "ipset" -msgstr "ipset" - -msgid "iptables" -msgstr "iptables" - -msgid "ironic" -msgstr "ironic" - -msgid "itsec" -msgstr "itsec" - -msgid "jumbo frame" -msgstr "ジャンボフレーム" - -msgid "kernel-based VM (KVM)" -msgstr "kernel-based VM (KVM)" - -msgid "keystone" -msgstr "keystone" - -msgid "large object" -msgstr "ラージオブジェクト" - -msgid "libvirt" -msgstr "libvirt" - -msgid "libvirt for KVM or QEMU" -msgstr "KVM/QEMU 向けの libvirt" - -msgid "libvirt\\_type setting" -msgstr "libvirt\\_type 設定" - -msgid "live migration" -msgstr "ライブマイグレーション" - -msgid "load balancer" -msgstr "負荷分散装置" - -msgid "load balancing" -msgstr "負荷分散" - -msgid "magnum" -msgstr "magnum" - -msgid "management API" -msgstr "マネジメント API" - -msgid "management network" -msgstr "管理ネットワーク" - -msgid "manager" -msgstr "マネージャー" - -msgid "manifest" -msgstr "マニフェスト" - -msgid "manifest object" -msgstr "マニフェストオブジェクト" - -msgid "manila" -msgstr "manila" - -msgid "manila-api" -msgstr "manila-api" - -msgid "manila-scheduler" -msgstr "manila-scheduler" - -msgid "manila-share" -msgstr "manila-share" - -msgid "maximum transmission unit (MTU)" -msgstr "最大転送単位 (MTU)" - -msgid "mechanism driver" -msgstr "メカニズムドライバー" - -msgid "melange" -msgstr "melange" - -msgid "membership" -msgstr "メンバーシップ" - -msgid "membership list" -msgstr "メンバーシップリスト" - -msgid "memcached" -msgstr "memcached" - -msgid "memory overcommit" -msgstr "メモリーオーバーコミット" - -msgid "message broker" -msgstr "メッセージブローカー" - -msgid "message bus" -msgstr "メッセージバス" - -msgid "message queue" -msgstr "メッセージキュー" - -msgid "migration" -msgstr "マイグレーション" - -msgid "mistral" -msgstr "mistral" - -msgid "monasca" -msgstr "monasca" - -msgid "multi-factor authentication" -msgstr "多要素認証" - -msgid "multi-host" -msgstr "マルチホスト" - -msgid "multinic" -msgstr "マルチ NIC" - -msgid "murano" -msgstr "murano" - -msgid "ne2k\\_pci" -msgstr "ne2k\\_pci" - -msgid "netadmin" -msgstr "netadmin" - -msgid "netfront" -msgstr "netfront" - -msgid "network" -msgstr "Network" - -msgid "network ID" -msgstr "ネットワーク ID" - -msgid "network UUID" -msgstr "ネットワーク UUID" - -msgid "network controller" -msgstr "ネットワークコントローラー" - -msgid "network manager" -msgstr "ネットワークマネージャー" - -msgid "network namespace" -msgstr "ネットワーク名前空間" - -msgid "network node" -msgstr "ネットワークノード" - -msgid "network segment" -msgstr "ネットワークセグメント" - -msgid "network worker" -msgstr "ネットワークワーカー" - -msgid "neutron" -msgstr "neutron" - -msgid "neutron API" -msgstr "neutron API" - -msgid "neutron manager" -msgstr "neutron マネージャー" - -msgid "neutron plug-in" -msgstr "neutron プラグイン" - -msgid "neutron-server" -msgstr "neutron-server" - -msgid "node" -msgstr "node" - -msgid "non-durable exchange" -msgstr "非永続交換" - -msgid "non-durable queue" -msgstr "非永続キュー" - -msgid "non-persistent volume" -msgstr "非永続ボリューム" - -msgid "north-south traffic" -msgstr "ノース・サウス通信" - -msgid "nova" -msgstr "nova" - -msgid "nova-network" -msgstr "nova-network" - -msgid "object" -msgstr "オブジェクト" - -msgid "object auditor" -msgstr "オブジェクトオーディター" - -msgid "object expiration" -msgstr "オブジェクト有効期限" - -msgid "object hash" -msgstr "オブジェクトハッシュ" - -msgid "object path hash" -msgstr "オブジェクトパスハッシュ" - -msgid "object replicator" -msgstr "オブジェクトレプリケーター" - -msgid "object server" -msgstr "オブジェクトサーバー" - -msgid "object versioning" -msgstr "オブジェクトバージョニング" - -msgid "openSUSE" -msgstr "openSUSE" - -msgid "operator" -msgstr "運用者" - -msgid "optional service" -msgstr "オプションサービス" - -msgid "orphan" -msgstr "orphan" - -msgid "parent cell" -msgstr "親セル" - -msgid "partition" -msgstr "パーティション" - -msgid "partition index" -msgstr "パーティションインデックス" - -msgid "partition shift value" -msgstr "パーティションシフト値" - -msgid "path MTU discovery (PMTUD)" -msgstr "path MTU discovery (PMTUD)" - -msgid "pause" -msgstr "一時停止" - -msgid "pcnet" -msgstr "pcnet" - -msgid "persistent message" -msgstr "永続メッセージ" - -msgid "persistent volume" -msgstr "永続ボリューム" - -msgid "personality file" -msgstr "パーソナリティーファイル" - -msgid "pip package" -msgstr "pip パッケージ" - -msgid "plug-in" -msgstr "プラグイン" - -msgid "policy service" -msgstr "ポリシーサービス" - -msgid "pool" -msgstr "プール" - -msgid "pool member" -msgstr "プールメンバー" - -msgid "port" -msgstr "ポート" - -msgid "port UUID" -msgstr "ポート UUID" - -msgid "preseed" -msgstr "preseed" - -msgid "private IP address" -msgstr "プライベート IP アドレス" - -msgid "private image" -msgstr "プライベートイメージ" - -msgid "private network" -msgstr "プライベートネットワーク" - -msgid "project" -msgstr "プロジェクト" - -msgid "project ID" -msgstr "プロジェクト ID" - -msgid "project VPN" -msgstr "プロジェクト VPN" - -msgid "promiscuous mode" -msgstr "プロミスキャスモード" - -msgid "protected property" -msgstr "保護プロパティー" - -msgid "provider" -msgstr "プロバイダー" - -msgid "proxy node" -msgstr "プロキシノード" - -msgid "proxy server" -msgstr "プロキシサーバー" - -msgid "public API" -msgstr "パブリック API" - -msgid "public IP address" -msgstr "パブリック IP アドレス" - -msgid "public image" -msgstr "パブリックイメージ" - -msgid "public key authentication" -msgstr "公開鍵認証" - -msgid "public network" -msgstr "パブリックネットワーク" - -msgid "python-barbicanclient" -msgstr "python-barbicanclient" - -msgid "python-ceilometerclient" -msgstr "python-ceilometerclient" - -msgid "python-cinderclient" -msgstr "python-cinderclient" - -msgid "python-cloudkittyclient" -msgstr "python-cloudkittyclient" - -msgid "python-fuelclient" -msgstr "python-fuelclient" - -msgid "python-glanceclient" -msgstr "python-glanceclient" - -msgid "python-gnocchiclient" -msgstr "python-gnocchiclient" - -msgid "python-heatclient" -msgstr "python-heatclient" - -msgid "python-keystoneclient" -msgstr "python-keystoneclient" - -msgid "python-magnumclient" -msgstr "python-magnumclient" - -msgid "python-manilaclient" -msgstr "python-manilaclient" - -msgid "python-mistralclient" -msgstr "python-mistralclient" - -msgid "python-monascaclient" -msgstr "python-monascaclient" - -msgid "python-muranoclient" -msgstr "python-muranoclient" - -msgid "python-neutronclient" -msgstr "python-neutronclient" - -msgid "python-novaclient" -msgstr "python-novaclient" - -msgid "python-saharaclient" -msgstr "python-saharaclient" - -msgid "python-senlinclient" -msgstr "python-senlinclient" - -msgid "python-swiftclient" -msgstr "python-swiftclient" - -msgid "python-troveclient" -msgstr "python-troveclient" - -msgid "qemu or kvm" -msgstr "qemu または kvm" - -msgid "quarantine" -msgstr "隔離" - -msgid "quota" -msgstr "クォータ" - -msgid "radvd" -msgstr "radvd" - -msgid "rally" -msgstr "rally" - -msgid "rate limit" -msgstr "レートリミット" - -msgid "raw" -msgstr "raw" - -msgid "rebalance" -msgstr "リバランス" - -msgid "reboot" -msgstr "リブート" - -msgid "rebuild" -msgstr "リビルド" - -msgid "record" -msgstr "レコード" - -msgid "record ID" -msgstr "レコード ID" - -msgid "reference architecture" -msgstr "リファレンスアーキテクチャー" - -msgid "region" -msgstr "リージョン" - -msgid "registry" -msgstr "レジストリー" - -msgid "registry server" -msgstr "レジストリサーバー" - -msgid "replica" -msgstr "レプリカ" - -msgid "replica count" -msgstr "レプリカ数" - -msgid "replication" -msgstr "レプリケーション" - -msgid "replicator" -msgstr "レプリケーター" - -msgid "request ID" -msgstr "リクエスト ID" - -msgid "rescue image" -msgstr "レスキューイメージ" - -msgid "resize" -msgstr "リサイズ" - -msgid "ring" -msgstr "リング" - -msgid "ring builder" -msgstr "リングビルダー" - -msgid "role" -msgstr "ロール" - -msgid "role ID" -msgstr "ロール ID" - -msgid "rootwrap" -msgstr "rootwrap" - -msgid "round-robin scheduler" -msgstr "ラウンドロビンスケジューラー" - -msgid "router" -msgstr "ルーター" - -msgid "routing key" -msgstr "ルーティングキー" - -msgid "rsync" -msgstr "rsync" - -msgid "rtl8139" -msgstr "rtl8139" - -msgid "sahara" -msgstr "sahara" - -msgid "scheduler manager" -msgstr "スケジューラーマネージャー" - -msgid "scoped token" -msgstr "スコープ付きトークン" - -msgid "scrubber" -msgstr "スクラバー" - -msgid "scsi" -msgstr "scsi" - -msgid "secret key" -msgstr "シークレットキー" - -msgid "secure shell (SSH)" -msgstr "secure shell (SSH)" - -msgid "security group" -msgstr "セキュリティーグループ" - -msgid "segmented object" -msgstr "分割オブジェクト" - -msgid "self-service" -msgstr "セルフサービス" - -msgid "senlin" -msgstr "senlin" - -msgid "server" -msgstr "サーバー" - -msgid "server UUID" -msgstr "サーバー UUID" - -msgid "server image" -msgstr "サーバーイメージ" - -msgid "service" -msgstr "サービス" - -msgid "service ID" -msgstr "サービス ID" - -msgid "service catalog" -msgstr "サービスカタログ" - -msgid "service provider" -msgstr "サービスプロバイダー" - -msgid "service registration" -msgstr "サービス登録" - -msgid "service tenant" -msgstr "サービステナント" - -msgid "service token" -msgstr "サービストークン" - -msgid "session back end" -msgstr "セッションバックエンド" - -msgid "session persistence" -msgstr "セッション持続性" - -msgid "session storage" -msgstr "セッションストレージ" - -msgid "setuptools package" -msgstr "setuptools パッケージ" - -msgid "share" -msgstr "共有" - -msgid "share network" -msgstr "ネットワーク共有 (share network)" - -msgid "shared IP address" -msgstr "共有 IP アドレス" - -msgid "shared IP group" -msgstr "共有 IP グループ" - -msgid "shared storage" -msgstr "共有ストレージ" - -msgid "snapshot" -msgstr "スナップショット" - -msgid "soft reboot" -msgstr "ソフトリブート" - -msgid "solum" -msgstr "solum" - -msgid "spread-first scheduler" -msgstr "分散優先スケジューラー" - -msgid "stack" -msgstr "スタック" - -msgid "static IP address" -msgstr "静的 IP アドレス" - -msgid "storage back end" -msgstr "ストレージバックエンド" - -msgid "storage manager" -msgstr "ストレージマネージャー" - -msgid "storage manager back end" -msgstr "ストレージマネージャーバックエンド" - -msgid "storage node" -msgstr "ストレージノード" - -msgid "storage services" -msgstr "ストレージサービス" - -msgid "strategy" -msgstr "ストラテジー" - -msgid "subdomain" -msgstr "サブドメイン" - -msgid "subnet" -msgstr "サブネット" - -msgid "suspend" -msgstr "休止" - -msgid "swap" -msgstr "スワップ" - -msgid "swauth" -msgstr "swauth" - -msgid "swift" -msgstr "swift" - -msgid "swift All in One (SAIO)" -msgstr "swift All in One (SAIO)" - -msgid "swift client" -msgstr "swift クライアント" - -msgid "swift middleware" -msgstr "swift ミドルウェア" - -msgid "swift proxy server" -msgstr "swift プロキシサーバー" - -msgid "swift storage node" -msgstr "swift ストレージノード" - -msgid "swift-init" -msgstr "swift-init" - -msgid "swift-recon" -msgstr "swift-recon" - -msgid "swift-ring-builder" -msgstr "swift-ring-builder" - -msgid "sync point" -msgstr "同期ポイント" - -msgid "sysadmin" -msgstr "sysadmin" - -msgid "system usage" -msgstr "システム使用状況" - -msgid "tenant" -msgstr "テナント" - -msgid "tenant ID" -msgstr "テナント ID" - -msgid "tenant endpoint" -msgstr "テナントエンドポイント" - -msgid "token" -msgstr "トークン" - -msgid "token services" -msgstr "トークンサービス" - -msgid "tombstone" -msgstr "tombstone" - -msgid "topic publisher" -msgstr "トピック発行者" - -msgid "transaction ID" -msgstr "トランザクション ID" - -msgid "transient" -msgstr "一時" - -msgid "transient exchange" -msgstr "一時交換" - -msgid "transient message" -msgstr "一時メッセージ" - -msgid "transient queue" -msgstr "一時キュー" - -msgid "trove" -msgstr "trove" - -msgid "unscoped token" -msgstr "スコープなしトークン" - -msgid "updater" -msgstr "アップデーター" - -msgid "user" -msgstr "ユーザー" - -msgid "user data" -msgstr "ユーザーデータ" - -msgid "vSphere" -msgstr "vSphere" - -msgid "virtio" -msgstr "virtio" - -msgid "virtual IP" -msgstr "仮想 IP" - -msgid "virtual VLAN" -msgstr "仮想 VLAN" - -msgid "virtual machine (VM)" -msgstr "仮想マシン (VM)" - -msgid "virtual network" -msgstr "仮想ネットワーク" - -msgid "virtual networking" -msgstr "仮想ネットワーク" - -msgid "virtual port" -msgstr "仮想ポート" - -msgid "virtual private network (VPN)" -msgstr "仮想プライベートネットワーク (VPN)" - -msgid "virtual server" -msgstr "仮想サーバー" - -msgid "virtual switch (vSwitch)" -msgstr "仮想スイッチ (vSwitch)" - -msgid "vmware" -msgstr "vmware" - -msgid "volume" -msgstr "ボリューム" - -msgid "volume ID" -msgstr "ボリューム ID" - -msgid "volume controller" -msgstr "ボリュームコントローラー" - -msgid "volume driver" -msgstr "ボリュームドライバー" - -msgid "volume manager" -msgstr "ボリュームマネージャー" - -msgid "volume node" -msgstr "ボリュームノード" - -msgid "volume plug-in" -msgstr "ボリュームプラグイン" - -msgid "volume worker" -msgstr "ボリュームワーカー" - -msgid "volume_type" -msgstr "volume_type" - -msgid "weight" -msgstr "ウェイト" - -msgid "weighted cost" -msgstr "重み付けコスト" - -msgid "weighting" -msgstr "重み付け" - -msgid "worker" -msgstr "ワーカー" - -msgid "x509 certificates." -msgstr "x509 証明書。" - -msgid "xen" -msgstr "xen" - -msgid "zaqar" -msgstr "zaqar" diff --git a/doc/ha-guide/setup.cfg b/doc/ha-guide/setup.cfg deleted file mode 100644 index 0d59cf74..00000000 --- a/doc/ha-guide/setup.cfg +++ /dev/null @@ -1,30 +0,0 @@ -[metadata] -name = openstackhaguide -summary = OpenStack High Availability Guide -author = OpenStack -author-email = openstack-docs@lists.openstack.org -home-page = http://docs.openstack.org/ -classifier = -Environment :: OpenStack -Intended Audience :: Information Technology -Intended Audience :: System Administrators -License :: OSI Approved :: Apache Software License -Operating System :: POSIX :: Linux -Topic :: Documentation - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[files] - -[build_sphinx] -all_files = 1 -build-dir = build -source-dir = source - -[wheel] -universal = 1 - -[pbr] -warnerrors = True diff --git a/doc/ha-guide/setup.py b/doc/ha-guide/setup.py deleted file mode 100644 index 73637574..00000000 --- a/doc/ha-guide/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/doc/ha-guide/source/common b/doc/ha-guide/source/common deleted file mode 120000 index dc879abe..00000000 --- a/doc/ha-guide/source/common +++ /dev/null @@ -1 +0,0 @@ -../../common \ No newline at end of file diff --git a/doc/ha-guide/source/compute-node-ha-api.rst b/doc/ha-guide/source/compute-node-ha-api.rst deleted file mode 100644 index 78888ac6..00000000 --- a/doc/ha-guide/source/compute-node-ha-api.rst +++ /dev/null @@ -1,12 +0,0 @@ - -============================================ -Configure high availability on compute nodes -============================================ - -The `Installation Guide -`_ -gives instructions for installing multiple compute nodes. -To make them highly available, -you must configure the environment -to include multiple instances of the API -and other services. diff --git a/doc/ha-guide/source/compute-node-ha.rst b/doc/ha-guide/source/compute-node-ha.rst deleted file mode 100644 index 9f0c98d2..00000000 --- a/doc/ha-guide/source/compute-node-ha.rst +++ /dev/null @@ -1,10 +0,0 @@ - -================================================== -Configuring the compute node for high availability -================================================== - -.. toctree:: - :maxdepth: 2 - - compute-node-ha-api.rst - diff --git a/doc/ha-guide/source/conf.py b/doc/ha-guide/source/conf.py deleted file mode 100644 index a0542988..00000000 --- a/doc/ha-guide/source/conf.py +++ /dev/null @@ -1,289 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -# import sys - -import openstackdocstheme - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [] - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'High Availability Guide' -bug_tag = u'ha-guide' -copyright = u'2015, OpenStack contributors' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.0.1' -# The full version, including alpha/beta/rc tags. -release = '0.0.1' - -# A few variables have to be set for the log-a-bug feature. -# giturl: The location of conf.py on Git. Must be set manually. -# gitsha: The SHA checksum of the bug description. Automatically extracted from git log. -# bug_tag: Tag for categorizing the bug. Must be set manually. -# These variables are passed to the logabug code via html_context. -giturl = u'http://git.openstack.org/cgit/openstack/ha-guide/tree/doc/ha-guide/source' -git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '" -gitsha = os.popen(git_cmd).read().strip('\n') -html_context = {"gitsha": gitsha, "bug_tag": bug_tag, - "giturl": giturl} - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = [] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# So that we can enable "log-a-bug" links from each output HTML page, this -# variable must be set to a format that includes year, month, day, hours and -# minutes. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -html_use_index = False - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = False - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'ha-guide' - -# If true, publish source files -html_copy_source = False - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'HAGuide.tex', u'High Availability Guide', - u'OpenStack contributors', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'haguide', u'High Availability Guide', - [u'OpenStack contributors'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'HAGuide', u'High Availability Guide', - u'OpenStack contributors', 'HAGuide', - 'This guide shows OpenStack operators and deployers how to configure' - 'OpenStack Networking to be robust and fault-tolerant.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/doc/ha-guide/source/controller-ha-galera-config.rst b/doc/ha-guide/source/controller-ha-galera-config.rst deleted file mode 100644 index e3bc19fc..00000000 --- a/doc/ha-guide/source/controller-ha-galera-config.rst +++ /dev/null @@ -1,396 +0,0 @@ -Configuration -============== - -Before you launch Galera Cluster, you need to configure the server -and the database to operate as part of the cluster. - -Configuring the server -~~~~~~~~~~~~~~~~~~~~~~~ - -Certain services running on the underlying operating system of your -OpenStack database may block Galera Cluster from normal operation -or prevent ``mysqld`` from achieving network connectivity with the cluster. - - -Firewall ---------- - -Galera Cluster requires that you open four ports to network traffic: - -- On ``3306``, Galera Cluster uses TCP for database client connections - and State Snapshot Transfers methods that require the client, - (that is, ``mysqldump``). -- On ``4567`` Galera Cluster uses TCP for replication traffic. Multicast - replication uses both TCP and UDP on this port. -- On ``4568`` Galera Cluster uses TCP for Incremental State Transfers. -- On ``4444`` Galera Cluster uses TCP for all other State Snapshot Transfer - methods. - -.. seealso:: For more information on firewalls, see `Firewalls and default ports - `_, in the Configuration Reference. - - - -``iptables`` -^^^^^^^^^^^^^ - -For many Linux distributions, you can configure the firewall using -the ``iptables`` utility. To do so, complete the following steps: - -#. For each cluster node, run the following commands, replacing - ``NODE-IP-ADDRESS`` with the IP address of the cluster node - you want to open the firewall to: - - .. code-block:: console - - # iptables --append INPUT --in-interface eth0 \ - --protocol --match tcp --dport 3306 \ - --source NODE-IP-ADDRESS --jump ACCEPT - # iptables --append INPUT --in-interface eth0 \ - --protocol --match tcp --dport 4567 \ - --source NODE-IP-ADDRESS --jump ACCEPT - # iptables --append INPUT --in-interface eth0 \ - --protocol --match tcp --dport 4568 \ - --source NODE-IP-ADDRESS --jump ACCEPT - # iptables --append INPUT --in-interface eth0 \ - --protocol --match tcp --dport 4444 \ - --source NODE-IP-ADDRESS --jump ACCEPT - - In the event that you also want to configure multicast replication, - run this command as well: - - .. code-block:: console - - # iptables --append INPUT --in-interface eth0 \ - --protocol udp --match udp --dport 4567 \ - --source NODE-IP-ADDRESS --jump ACCEPT - - -#. Make the changes persistent. For servers that use ``init``, use - the :command:`save` command: - - .. code-block:: console - - # service save iptables - - For servers that use ``systemd``, you need to save the current packet - filtering to the path of the file that ``iptables`` reads when it starts. - This path can vary by distribution, but common locations are in the - ``/etc`` directory, such as: - - - ``/etc/sysconfig/iptables`` - - ``/etc/iptables/iptables.rules`` - - When you find the correct path, run the :command:`iptables-save` command: - - .. code-block:: console - - # iptables-save > /etc/sysconfig/iptables - -With the firewall configuration saved, whenever your OpenStack -database starts. - -``firewall-cmd`` -^^^^^^^^^^^^^^^^^ - -For many Linux distributions, you can configure the firewall using the -``firewall-cmd`` utility for FirewallD. To do so, complete the following -steps on each cluster node: - -#. Add the Galera Cluster service: - - .. code-block:: console - - # firewall-cmd --add-service=mysql - -#. For each instance of OpenStack database in your cluster, run the - following commands, replacing ``NODE-IP-ADDRESS`` with the IP address - of the cluster node you want to open the firewall to: - - .. code-block:: console - - # firewall-cmd --add-port=3306/tcp - # firewall-cmd --add-port=4567/tcp - # firewall-cmd --add-port=4568/tcp - # firewall-cmd --add-port=4444/tcp - - In the event that you also want to configure mutlicast replication, - run this command as well: - - .. code-block:: console - - # firewall-cmd --add-port=4567/udp - -#. To make this configuration persistent, repeat the above commands - with the :option:`--permanent` option. - - .. code-block:: console - - # firewall-cmd --add-service=mysql --permanent - # firewall-cmd --add-port=3306/tcp --permanent - # firewall-cmd --add-port=4567/tcp --permanent - # firewall-cmd --add-port=4568/tcp --permanent - # firewall-cmd --add-port=4444/tcp --permanent - # firewall-cmd --add-port=4567/udp --permanent - - -With the firewall configuration saved, whenever your OpenStack -database starts. - -SELinux --------- - -Security-Enhanced Linux is a kernel module for improving security on Linux -operating systems. It is commonly enabled and configured by default on -Red Hat-based distributions. In the context of Galera Cluster, systems with -SELinux may block the database service, keep it from starting or prevent it -from establishing network connections with the cluster. - -To configure SELinux to permit Galera Cluster to operate, complete -the following steps on each cluster node: - -#. Using the ``semanage`` utility, open the relevant ports: - - .. code-block:: console - - # semanage port -a -t mysqld_port_t -p tcp 3306 - # semanage port -a -t mysqld_port_t -p tcp 4567 - # semanage port -a -t mysqld_port_t -p tcp 4568 - # semanage port -a -t mysqld_port_t -p tcp 4444 - - In the event that you use multicast replication, you also need to - open ``4567`` to UDP traffic: - - .. code-block:: console - - # semanage port -a -t mysqld_port_t -p udp 4567 - -#. Set SELinux to allow the database server to run: - - .. code-block:: console - - # semanage permissive -a mysqld_t - -With these options set, SELinux now permits Galera Cluster to operate. - -.. note:: Bear in mind, leaving SELinux in permissive mode is not a good - security practice. Over the longer term, you need to develop a - security policy for Galera Cluster and then switch SELinux back - into enforcing mode. - - For more information on configuring SELinux to work with - Galera Cluster, see the `Documentation - `_ - - -AppArmor ---------- - -Application Armor is a kernel module for improving security on Linux -operating systems. It is developed by Canonical and commonly used on -Ubuntu-based distributions. In the context of Galera Cluster, systems -with AppArmor may block the database service from operating normally. - -To configure AppArmor to work with Galera Cluster, complete the -following steps on each cluster node: - -#. Create a symbolic link for the database server in the ``disable`` directory: - - .. code-block:: console - - # ln -s /etc/apparmor.d/usr /etc/apparmor.d/disable/.sbin.mysqld - -#. Restart AppArmor. For servers that use ``init``, run the following command: - - .. code-block:: console - - # service apparmor restart - - For servers that use ``systemd``, instead run this command: - - .. code-block:: console - - # systemctl restart apparmor - -AppArmor now permits Galera Cluster to operate. - - -Database configuration -~~~~~~~~~~~~~~~~~~~~~~~ - -MySQL databases, including MariaDB and Percona XtraDB, manage their -configurations using a ``my.cnf`` file, which is typically located in the -``/etc`` directory. Configuration options available in these databases are -also available in Galera Cluster, with some restrictions and several -additions. - -.. code-block:: ini - - [mysqld] - datadir=/var/lib/mysql - socket=/var/lib/mysql/mysql.sock - user=mysql - binlog_format=ROW - bind-address=0.0.0.0 - - # InnoDB Configuration - default_storage_engine=innodb - innodb_autoinc_lock_mode=2 - innodb_flush_log_at_trx_commit=0 - innodb_buffer_pool_size=122M - - # Galera Cluster Configuration - wsrep_provider=/usr/lib/libgalera_smm.so - wsrep_provider_options="pc.recovery=TRUE;gcache.size=300M" - wsrep_cluster_name="my_example_cluster" - wsrep_cluster_address="gcomm://GALERA1-IP,GALERA2-IP,GALERA3-IP" - wsrep_sst_method=rsync - - - -Configuring ``mysqld`` ------------------------ - -While all of the configuration parameters available to the standard MySQL, -MariaDB or Percona XtraDB database server are available in Galera Cluster, -there are some that you must define an outset to avoid conflict or -unexpected behavior. - -- Ensure that the database server is not bound only to to the localhost, - ``127.0.0.1``. Instead, bind it to ``0.0.0.0`` to ensure it listens on - all available interfaces. - - .. code-block:: ini - - bind-address=0.0.0.0 - -- Ensure that the binary log format is set to use row-level replication, - as opposed to statement-level replication: - - .. code-block:: ini - - binlog_format=ROW - - -Configuring InnoDB -------------------- - -Galera Cluster does not support non-transactional storage engines and -requires that you use InnoDB by default. There are some additional -parameters that you must define to avoid conflicts. - -- Ensure that the default storage engine is set to InnoDB: - - .. code-block:: ini - - default_storage_engine=InnoDB - -- Ensure that the InnoDB locking mode for generating auto-increment values - is set to ``2``, which is the interleaved locking mode. - - .. code-block:: ini - - innodb_autoinc_lock_mode=2 - - Do not change this value. Other modes may cause ``INSERT`` statements - on tables with auto-increment columns to fail as well as unresolved - deadlocks that leave the system unresponsive. - -- Ensure that the InnoDB log buffer is written to file once per second, - rather than on each commit, to improve performance: - - .. code-block:: ini - - innodb_flush_log_at_trx_commit=0 - - Bear in mind, while setting this parameter to ``1`` or ``2`` can improve - performance, it introduces certain dangers. Operating system failures can - erase the last second of transactions. While you can recover this data - from another node, if the cluster goes down at the same time - (in the event of a data center power outage), you lose this data permanently. - -- Define the InnoDB memory buffer pool size. The default value is 128 MB, - but to compensate for Galera Cluster's additional memory usage, scale - your usual value back by 5%: - - .. code-block:: ini - - innodb_buffer_pool_size=122M - - -Configuring wsrep replication ------------------------------- - -Galera Cluster configuration parameters all have the ``wsrep_`` prefix. -There are five that you must define for each cluster node in your -OpenStack database. - -- **wsrep Provider** The Galera Replication Plugin serves as the wsrep - Provider for Galera Cluster. It is installed on your system as the - ``libgalera_smm.so`` file. You must define the path to this file in - your ``my.cnf``. - - .. code-block:: ini - - wsrep_provider="/usr/lib/libgalera_smm.so" - -- **Cluster Name** Define an arbitrary name for your cluster. - - .. code-block:: ini - - wsrep_cluster_name="my_example_cluster" - - You must use the same name on every cluster node. The connection fails - when this value does not match. - -- **Cluster Address** List the IP addresses for each cluster node. - - .. code-block:: ini - - wsrep_cluster_address="gcomm://192.168.1.1,192.168.1.2,192.168.1.3" - - Replace the IP addresses given here with comma-separated list of each - OpenStack database in your cluster. - -- **Node Name** Define the logical name of the cluster node. - - .. code-block:: ini - - wsrep_node_name="Galera1" - -- **Node Address** Define the IP address of the cluster node. - - .. code-block:: ini - - wsrep_node_address="192.168.1.1" - - - - -Additional parameters -^^^^^^^^^^^^^^^^^^^^^^ - -For a complete list of the available parameters, run the -``SHOW VARIABLES`` command from within the database client: - -.. code-block:: mysql - - SHOW VARIABLES LIKE 'wsrep_%'; - - +------------------------------+-------+ - | Variable_name | Value | - +------------------------------+-------+ - | wsrep_auto_increment_control | ON | - +------------------------------+-------+ - | wsrep_causal_reads | OFF | - +------------------------------+-------+ - | wsrep_certify_nonPK | ON | - +------------------------------+-------+ - | ... | ... | - +------------------------------+-------+ - | wsrep_sync_wait | 0 | - +------------------------------+-------+ - -For the documentation of these parameters, wsrep Provider option and status -variables available in Galera Cluster, see `Reference -`_. diff --git a/doc/ha-guide/source/controller-ha-galera-install.rst b/doc/ha-guide/source/controller-ha-galera-install.rst deleted file mode 100644 index 57e318bd..00000000 --- a/doc/ha-guide/source/controller-ha-galera-install.rst +++ /dev/null @@ -1,275 +0,0 @@ -Installation -============= - -Using Galera Cluster requires that you install two packages. The first is -the database server, which must include the wsrep API patch. The second -package is the Galera Replication Plugin, which enables the write-set -replication service functionality with the database server. - -There are three implementations of Galera Cluster: MySQL, MariaDB and -Percona XtraDB. For each implementation, there is a software repository that -provides binary packages for Debian, Red Hat, and SUSE-based Linux -distributions. - - -Enabling the repository -~~~~~~~~~~~~~~~~~~~~~~~~ - -Galera Cluster is not available in the base repositories of Linux -distributions. In order to install it with your package manage, you must -first enable the repository on your system. The particular methods for -doing so vary depending on which distribution you use for OpenStack and -which database server you want to use. - -Debian -------- - -For Debian and Debian-based distributions, such as Ubuntu, complete the -following steps: - -#. Add the GnuPG key for the database repository that you want to use. - - .. code-block:: console - - # apt-key adv --recv-keys --keyserver \ - keyserver.ubuntu.com BC19DDBA - - Note that the particular key value in this command varies depending on - which database software repository you want to use. - - +--------------------------+------------------------+ - | Database | Key | - +==========================+========================+ - | Galera Cluster for MySQL | ``BC19DDBA`` | - +--------------------------+------------------------+ - | MariaDB Galera Cluster | ``0xcbcb082a1bb943db`` | - +--------------------------+------------------------+ - | Percona XtraDB Cluster | ``1C4CBDCDCD2EFD2A`` | - +--------------------------+------------------------+ - -#. Add the repository to your sources list. Using your preferred text - editor, create a ``galera.list`` file in the ``/etc/apt/sources.list.d/`` - directory. For the contents of this file, use the lines that pertain to - the software repository you want to install: - - .. code-block:: linux-config - - # Galera Cluster for MySQL - deb http://releases.galeracluster.com/DISTRO RELEASE main - - # MariaDB Galera Cluster - deb http://mirror.jmu.edu/pub/mariadb/repo/VERSION/DISTRO RELEASE main - - # Percona XtraDB Cluster - deb http://repo.percona.com/apt RELEASE main - - For each entry: Replace all instances of ``DISTRO`` with the distribution - that you use, such as ``debian`` or ``ubuntu``. Replace all instances of - ``RELEASE`` with the release of that distribution, such as ``wheezy`` or - ``trusty``. Replace all instances of ``VERSION`` with the version of the - database server that you want to install, such as ``5.6`` or ``10.0``. - - .. note:: In the event that you do not know the release code-name for - your distribution, you can use the following command to - find it out: - - .. code-block:: console - - $ lsb_release -a - - -#. Update the local cache. - - .. code-block:: console - - # apt-get update - -Packages in the Galera Cluster Debian repository are now available for -installation on your system. - -Red Hat --------- - -For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the -process is more straightforward. In this file, only enter the text for -the repository you want to use. - -- For Galera Cluster for MySQL, using your preferred text editor, create a - ``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory. - - .. code-block:: linux-config - - [galera] - name = Galera Cluster for MySQL - baseurl = http://releases.galeracluster.com/DISTRO/RELEASE/ARCH - gpgkey = http://releases.galeracluster.com/GPG-KEY-galeracluster.com - gpgcheck = 1 - - Replace ``DISTRO`` with the name of the distribution you use, such as - ``centos`` or ``fedora``. Replace ``RELEASE`` with the release number, - such as ``7`` for CentOS 7. Replace ``ARCH`` with your system - architecture, such as ``x86_64`` - -- For MariaDB Galera Cluster, using your preferred text editor, create a - ``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory. - - .. code-block:: linux-config - - [mariadb] - name = MariaDB Galera Cluster - baseurl = http://yum.mariadb.org/VERSION/PACKAGE - gpgkey = https://yum.mariadb.org/RPM-GPG-KEY-MariaDB - gpgcheck = 1 - - Replace ``VERSION`` with the version of MariaDB you want to install, such - as ``5.6`` or ``10.0``. Replace ``PACKAGE`` with the package type and - architecture, such as ``rhel6-amd64`` for Red Hat 6 on 64-bit - architecture. - -- For Percona XtraDB Cluster, run the following command: - - .. code-block:: console - - # yum install http://www.percona.com/downloads/percona-release/redhat/0.1-3/percona-release-0.1-3.noarch.rpm - - Bear in mind that the Percona repository only supports Red Hat Enterprise - Linux and CentOS distributions. - -Packages in the Galera Cluster Red Hat repository are not available for -installation on your system. - - - -SUSE ------ - -For SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE -binary installations are only available for Galera Cluster for MySQL and -MariaDB Galera Cluster. - -#. Create a ``Galera.repo`` file in the local directory. For Galera Cluster - for MySQL, use the following content: - - .. code-block:: linux-config - - [galera] - name = Galera Cluster for MySQL - baseurl = http://releases.galeracluster.com/DISTRO/RELEASE - gpgkey = http://releases.galeracluster.com/GPG-KEY-galeracluster.com - gpgcheck = 1 - - In the text: Replace ``DISTRO`` with the name of the distribution you - use, such as ``sles`` or ``opensuse``. Replace ``RELEASE`` with the - version number of that distribution. - - For MariaDB Galera Cluster, instead use this content: - - .. code-block:: linux-config - - [mariadb] - name = MariaDB Galera Cluster - baseurl = http://yum.mariadb.org/VERSION/PACKAGE - gpgkey = https://yum.mariadb.org/RPM-GPG-KEY-MariaDB - gpgcheck = 1 - - In the text: Replace ``VERSION`` with the version of MariaDB you want to - install, such as ``5.6`` or ``10.0``. Replace package with the package - architecture you want to use, such as ``opensuse13-amd64``. - -#. Add the repository to your system: - - .. code-block:: console - - $ sudo zypper addrepo Galera.repo - -#. Refresh ``zypper``: - - .. code-block:: console - - $ sudo zypper refresh - -Packages in the Galera Cluster SUSE repository are now available for -installation. - - -Installing Galera Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When you finish enabling the software repository for Galera Cluster, you can -install it using your package manager. The particular command and packages -you need to install varies depending on which database server you want to -install and which Linux distribution you use: - -Galera Cluster for MySQL: - - -- For Debian and Debian-based distributions, such as Ubuntu, run the - following command: - - .. code-block:: console - - # apt-get install galera-3 mysql-wsrep-5.6 - -- For Red Hat Enterprise Linux and Red Hat-based distributions, such as - Fedora or CentOS, instead run this command: - - .. code-block:: console - - # yum install galera-3 mysql-wsrep-5.6 - -- For SUSE Enterprise Linux Server and SUSE-based distributions, such as - openSUSE, instead run this command: - - .. code-block:: console - - # zypper install galera-3 mysql-wsrep-5.6 - - -MariaDB Galera Cluster: - -- For Debian and Debian-based distributions, such as Ubuntu, run the - following command: - - .. code-block:: console - - # apt-get install galera mariadb-galera-server - -- For Red Hat Enterprise Linux and Red Hat-based distributions, such as - Fedora or CentOS, instead run this command: - - .. code-block:: console - - # yum install galera MariaDB-Galera-server - -- For SUSE Enterprise Linux Server and SUSE-based distributions, such as - openSUSE, instead run this command: - - .. code-block:: console - - # zypper install galera MariaDB-Galera-server - - -Percona XtraDB Cluster: - - -- For Debian and Debian-based distributions, such as Ubuntu, run the - following command: - - .. code-block:: console - - # apt-get install percona-xtradb-cluster - -- For Red Hat Enterprise Linux and Red Hat-based distributions, such as - Fedora or CentOS, instead run this command: - - .. code-block:: console - - # yum install Percona-XtraDB-Cluster - -Galera Cluster is now installed on your system. You must repeat this -process for each controller node in your cluster. - -.. warning:: In the event that you already installed the standalone version - of MySQL, MariaDB or Percona XtraDB, this installation purges all - privileges on your OpenStack database server. You must reapply the - privileges listed in the installation guide. diff --git a/doc/ha-guide/source/controller-ha-galera-manage.rst b/doc/ha-guide/source/controller-ha-galera-manage.rst deleted file mode 100644 index 705ab6ac..00000000 --- a/doc/ha-guide/source/controller-ha-galera-manage.rst +++ /dev/null @@ -1,256 +0,0 @@ -Management -=========== - -When you finish the installation and configuration process on each -cluster node in your OpenStack database, you can initialize Galera Cluster. - -Before you attempt this, verify that you have the following ready: - -- Database hosts with Galera Cluster installed. You need a - minimum of three hosts; -- No firewalls between the hosts; -- SELinux and AppArmor set to permit access to ``mysqld``; -- The correct path to ``libgalera_smm.so`` given to the - ``wsrep_provider`` parameter. - -Initializing the cluster -~~~~~~~~~~~~~~~~~~~~~~~~~ - -In Galera Cluster, the Primary Component is the cluster of database -servers that replicate into each other. In the event that a -cluster node loses connectivity with the Primary Component, it -defaults into a non-operational state, to avoid creating or serving -inconsistent data. - -By default, cluster nodes do not start as part of a Primary -Component. Instead they assume that one exists somewhere and -attempts to establish a connection with it. To create a Primary -Component, you must start one cluster node using the -``--wsrep-new-cluster`` option. You can do this using any cluster -node, it is not important which you choose. In the Primary -Component, replication and state transfers bring all databases to -the same state. - -To start the cluster, complete the following steps: - -#. Initialize the Primary Component on one cluster node. For - servers that use ``init``, run the following command: - - .. code-block:: console - - # service mysql start --wsrep-new-cluster - - For servers that use ``systemd``, instead run this command: - - .. code-block:: console - - # systemctl start mysql --wsrep-new-cluster - -#. Once the database server starts, check the cluster status using - the ``wsrep_cluster_size`` status variable. From the database - client, run the following command: - - .. code-block:: mysql - - SHOW STATUS LIKE 'wsrep_cluster_size'; - - +--------------------+-------+ - | Variable_name | Value | - +--------------------+-------+ - | wsrep_cluster_size | 1 | - +--------------------+-------+ - -#. Start the database server on all other cluster nodes. For - servers that use ``init``, run the following command: - - .. code-block:: console - - # service mysql start - - For servers that use ``systemd``, instead run this command: - - .. code-block:: console - - # systemctl start mysql - -#. When you have all cluster nodes started, log into the database - client on one of them and check the ``wsrep_cluster_size`` - status variable again. - - .. code-block:: mysql - - SHOW STATUS LIKE 'wsrep_cluster_size'; - - +--------------------+-------+ - | Variable_name | Value | - +--------------------+-------+ - | wsrep_cluster_size | 3 | - +--------------------+-------+ - -When each cluster node starts, it checks the IP addresses given to -the ``wsrep_cluster_address`` parameter and attempts to establish -network connectivity with a database server running there. Once it -establishes a connection, it attempts to join the Primary -Component, requesting a state transfer as needed to bring itself -into sync with the cluster. - -In the event that you need to restart any cluster node, you can do -so. When the database server comes back it, it establishes -connectivity with the Primary Component and updates itself to any -changes it may have missed while down. - - -Restarting the cluster ------------------------ - -Individual cluster nodes can stop and be restarted without issue. -When a database loses its connection or restarts, Galera Cluster -brings it back into sync once it reestablishes connection with the -Primary Component. In the event that you need to restart the -entire cluster, identify the most advanced cluster node and -initialize the Primary Component on that node. - -To find the most advanced cluster node, you need to check the -sequence numbers, or seqnos, on the last committed transaction for -each. You can find this by viewing ``grastate.dat`` file in -database directory, - -.. code-block:: console - - $ cat /path/to/datadir/grastate.dat - - # Galera saved state - version: 3.8 - uuid: 5ee99582-bb8d-11e2-b8e3-23de375c1d30 - seqno: 8204503945773 - -Alternatively, if the database server is running, use the -``wsrep_last_committed`` status variable: - -.. code-block:: mysql - - SHOW STATUS LIKE 'wsrep_last_committed'; - - +----------------------+--------+ - | Variable_name | Value | - +----------------------+--------+ - | wsrep_last_committed | 409745 | - +----------------------+--------+ - -This value increments with each transaction, so the most advanced -node has the highest sequence number, and therefore is the most up to date. - - -Configuration tips -~~~~~~~~~~~~~~~~~~~ - - -Deployment strategies ----------------------- - -Galera can be configured using one of the following -strategies: - -- Each instance has its own IP address; - - OpenStack services are configured with the list of these IP - addresses so they can select one of the addresses from those - available. - -- Galera runs behind HAProxy. - - HAProxy load balances incoming requests and exposes just one IP - address for all the clients. - - Galera synchronous replication guarantees a zero slave lag. The - failover procedure completes once HAProxy detects that the active - back end has gone down and switches to the backup one, which is - then marked as 'UP'. If no back ends are up (in other words, the - Galera cluster is not ready to accept connections), the failover - procedure finishes only when the Galera cluster has been - successfully reassembled. The SLA is normally no more than 5 - minutes. - -- Use MySQL/Galera in active/passive mode to avoid deadlocks on - ``SELECT ... FOR UPDATE`` type queries (used, for example, by nova - and neutron). This issue is discussed more in the following: - - - http://lists.openstack.org/pipermail/openstack-dev/2014-May/035264.html - - http://www.joinfu.com/ - -Of these options, the second one is highly recommended. Although Galera -supports active/active configurations, we recommend active/passive -(enforced by the load balancer) in order to avoid lock contention. - - - -Configuring HAProxy --------------------- - -If you use HAProxy for load-balancing client access to Galera -Cluster as described in the :doc:`controller-ha-haproxy`, you can -use the ``clustercheck`` utility to improve health checks. - -#. Create a configuration file for ``clustercheck`` at - ``/etc/sysconfig/clustercheck``: - - .. code-block:: ini - - MYSQL_USERNAME="clustercheck_user" - MYSQL_PASSWORD="my_clustercheck_password" - MYSQL_HOST="localhost" - MYSQL_PORT="3306" - -#. Log in to the database client and grant the ``clustercheck`` user - ``PROCESS`` privileges. - - .. code-block:: mysql - - GRANT PROCESS ON *.* TO 'clustercheck_user'@'localhost' - IDENTIFIED BY 'my_clustercheck_password'; - - FLUSH PRIVILEGES; - - You only need to do this on one cluster node. Galera Cluster - replicates the user to all the others. - -#. Create a configuration file for the HAProxy monitor service, at - ``/etc/xinetd.d/galera-monitor``: - - .. code-block:: ini - - service galera-monitor - { - port = 9200 - disable = no - socket_type = stream - protocol = tcp - wait = no - user = root - group = root - groups = yes - server = /usr/bin/clustercheck - type = UNLISTED - per_source = UNLIMITED - log_on_success = - log_on_failure = HOST - flags = REUSE - } - -#. Start the ``xinetd`` daemon for ``clustercheck``. For servers - that use ``init``, run the following commands: - - .. code-block:: console - - # service xinetd enable - # service xinetd start - - For servers that use ``systemd``, instead run these commands: - - .. code-block:: console - - # systemctl daemon-reload - # systemctl enable xinetd - # systemctl start xinetd - - diff --git a/doc/ha-guide/source/controller-ha-galera.rst b/doc/ha-guide/source/controller-ha-galera.rst deleted file mode 100644 index e294839c..00000000 --- a/doc/ha-guide/source/controller-ha-galera.rst +++ /dev/null @@ -1,33 +0,0 @@ -Database (Galera Cluster) -========================== - -The first step is to install the database that sits at the heart of the -cluster. To implement high availability, run an instance of the database on -each controller node and use Galera Cluster to provide replication between -them. Galera Cluster is a synchronous multi-master database cluster, based -on MySQL and the InnoDB storage engine. It is a high-availability service -that provides high system uptime, no data loss, and scalability for growth. - -You can achieve high availability for the OpenStack database in many -different ways, depending on the type of database that you want to use. -There are three implementations of Galera Cluster available to you: - -- `Galera Cluster for MySQL `_ The MySQL - reference implementation from Codership, Oy; -- `MariaDB Galera Cluster `_ The MariaDB - implementation of Galera Cluster, which is commonly supported in - environments based on Red Hat distributions; -- `Percona XtraDB Cluster `_ The XtraDB - implementation of Galera Cluster from Percona. - -In addition to Galera Cluster, you can also achieve high availability -through other database options, such as PostgreSQL, which has its own -replication system. - - -.. toctree:: - :maxdepth: 2 - - controller-ha-galera-install - controller-ha-galera-config - controller-ha-galera-manage diff --git a/doc/ha-guide/source/controller-ha-haproxy.rst b/doc/ha-guide/source/controller-ha-haproxy.rst deleted file mode 100644 index 8cc34602..00000000 --- a/doc/ha-guide/source/controller-ha-haproxy.rst +++ /dev/null @@ -1,229 +0,0 @@ -======= -HAProxy -======= - -HAProxy provides a fast and reliable HTTP reverse proxy and load balancer -for TCP or HTTP applications. It is particularly suited for web crawling -under very high loads while needing persistence or Layer 7 processing. -It realistically supports tens of thousands of connections with recent -hardware. - -Each instance of HAProxy configures its front end to accept connections -only from the virtual IP (VIP) address and to terminate them as a list -of all instances of the corresponding service under load balancing, -such as any OpenStack API service. - -This makes the instances of HAProxy act independently and fail over -transparently together with the network endpoints (VIP addresses) -failover and, therefore, shares the same SLA. - -You can alternatively use a commercial load balancer, which is a hardware -or software. A hardware load balancer generally has good performance. - -For detailed instructions about installing HAProxy on your nodes, -see its `official documentation `_. - -.. note:: - - HAProxy should not be a single point of failure. - It is advisable to have multiple HAProxy instances running, - where the number of these instances is a small odd number like 3 or 5. - You need to ensure its availability by other means, - such as Keepalived or Pacemaker. - -The common practice is to locate an HAProxy instance on each OpenStack -controller in the environment. - -Once configured (see example file below), add HAProxy to the cluster -and ensure the VIPs can only run on machines where HAProxy is active: - -``pcs`` - -.. code-block:: console - - $ pcs resource create lb-haproxy systemd:haproxy --clone - $ pcs constraint order start p_api-ip then lb-haproxy-clone kind=Optional - $ pcs constraint colocation add p_api-ip with lb-haproxy-clone - -``crmsh`` - -TBA - -Example Config File -~~~~~~~~~~~~~~~~~~~ - -Here is an example ``/etc/haproxy/haproxy.cfg`` configuration file. -You need a copy of it on each controller node. - -.. note:: - - To implement any changes made to this you must restart the HAProxy service - -.. code-block:: none - - global - chroot /var/lib/haproxy - daemon - group haproxy - maxconn 4000 - pidfile /var/run/haproxy.pid - user haproxy - - defaults - log global - maxconn 4000 - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout check 10s - - listen dashboard_cluster - bind :443 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:443 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:443 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:443 check inter 2000 rise 2 fall 5 - - listen galera_cluster - bind :3306 - balance source - option mysql-check - server controller1 10.0.0.12:3306 check port 9200 inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:3306 backup check port 9200 inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:3306 backup check port 9200 inter 2000 rise 2 fall 5 - - listen glance_api_cluster - bind :9292 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:9292 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:9292 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:9292 check inter 2000 rise 2 fall 5 - - listen glance_registry_cluster - bind :9191 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:9191 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:9191 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:9191 check inter 2000 rise 2 fall 5 - - listen keystone_admin_cluster - bind :35357 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:35357 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:35357 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:35357 check inter 2000 rise 2 fall 5 - - listen keystone_public_internal_cluster - bind :5000 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:5000 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:5000 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:5000 check inter 2000 rise 2 fall 5 - - listen nova_ec2_api_cluster - bind :8773 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:8773 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8773 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8773 check inter 2000 rise 2 fall 5 - - listen nova_compute_api_cluster - bind :8774 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:8774 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8774 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8774 check inter 2000 rise 2 fall 5 - - listen nova_metadata_api_cluster - bind :8775 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:8775 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8775 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8775 check inter 2000 rise 2 fall 5 - - listen cinder_api_cluster - bind :8776 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:8776 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8776 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8776 check inter 2000 rise 2 fall 5 - - listen ceilometer_api_cluster - bind :8777 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:8777 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8777 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8777 check inter 2000 rise 2 fall 5 - - listen nova_vncproxy_cluster - bind :6080 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:6080 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:6080 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:6080 check inter 2000 rise 2 fall 5 - - listen neutron_api_cluster - bind :9696 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:9696 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:9696 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:9696 check inter 2000 rise 2 fall 5 - - listen swift_proxy_cluster - bind :8080 - balance source - option tcplog - option tcpka - server controller1 10.0.0.12:8080 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8080 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8080 check inter 2000 rise 2 fall 5 - -.. note:: - - The Galera cluster configuration directive ``backup`` indicates - that two of the three controllers are standby nodes. - This ensures that only one node services write requests - because OpenStack support for multi-node writes is not yet production-ready. - -.. note:: - - The Telemetry API service configuration does not have the ``option httpchk`` - directive as it cannot process this check properly. - TODO: explain why the Telemetry API is so special - -[TODO: we need more commentary about the contents and format of this file] diff --git a/doc/ha-guide/source/controller-ha-keystone.rst b/doc/ha-guide/source/controller-ha-keystone.rst deleted file mode 100644 index 1abf1ea1..00000000 --- a/doc/ha-guide/source/controller-ha-keystone.rst +++ /dev/null @@ -1,147 +0,0 @@ - -============================ -Identity services (keystone) -============================ - -OpenStack Identity (keystone) -is the Identity service in OpenStack that is used by many services. -You should be familiar with -`OpenStack identity concepts -`_ -before proceeding. - -Making the OpenStack Identity service highly available -in active / passive mode involves: - -- :ref:`keystone-pacemaker` -- :ref:`keystone-config-identity` -- :ref:`keystone-services-config` - -.. _keystone-pacemaker: - -Add OpenStack Identity resource to Pacemaker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. You must first download the OpenStack Identity resource to Pacemaker - by running the following commands: - - .. code-block:: console - - # cd /usr/lib/ocf/resource.d - # mkdir openstack - # cd openstack - # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/keystone - # chmod a+rx * - -#. You can now add the Pacemaker configuration - for the OpenStack Identity resource - by running the :command:`crm configure` command - to connect to the Pacemaker cluster. - Add the following cluster resources: - - :: - - primitive p_keystone ocf:openstack:keystone \ - params config="/etc/keystone/keystone.conf" - os_password="secretsecret" \ - os_username="admin" - os_tenant_name="admin" - os_auth_url="http://10.0.0.11:5000/v2.0/" \ - op monitor interval="30s" timeout="30s" - - This configuration creates ``p_keystone``, - a resource for managing the OpenStack Identity service. - - :command:`crm configure` supports batch input - so you may copy and paste the above lines - into your live Pacemaker configuration, - and then make changes as required. - For example, you may enter edit ``p_ip_keystone`` - from the :command:`crm configure` menu - and edit the resource to match your preferred virtual IP address. - -#. After you add these resources, - commit your configuration changes by entering :command:`commit` - from the :command:`crm configure` menu. - Pacemaker then starts the OpenStack Identity service - and its dependent resources on one of your nodes. - -.. _keystone-config-identity: - -Configure OpenStack Identity service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Edit the :file:`keystone.conf` file - to change the values of the :manpage:`bind(2)` parameters: - - .. code-block:: ini - - bind_host = 10.0.0.11 - public_bind_host = 10.0.0.11 - admin_bind_host = 10.0.0.11 - - The ``admin_bind_host`` parameter - lets you use a private network for admin access. - -#. To be sure that all data is highly available, - ensure that everything is stored in the MySQL database - (which is also highly available): - - .. code-block:: ini - - [catalog] - driver = keystone.catalog.backends.sql.Catalog - ... - [identity] - driver = keystone.identity.backends.sql.Identity - ... - - -.. _keystone-services-config: - -Configure OpenStack services to use the highly available OpenStack Identity -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your OpenStack services must now point -their OpenStack Identity configuration -to the highly available virtual cluster IP address -rather than point to the physical IP address -of an OpenStack Identity server as you would do -in a non-HA environment. - -#. For OpenStack Compute, for example, - if your OpenStack Identiy service IP address is 10.0.0.11, - use the following configuration in your :file:`api-paste.ini` file: - - .. code-block:: ini - - auth_host = 10.0.0.11 - -#. You also need to create the OpenStack Identity Endpoint - with this IP address. - - .. note:: - - If you are using both private and public IP addresses, - you should create two virtual IP addresses - and define your endpoint like this: - - .. code-block:: console - - $ openstack endpoint create --region $KEYSTONE_REGION \ - $service-type public http://PUBLIC_VIP:5000/v2.0 - $ openstack endpoint create --region $KEYSTONE_REGION \ - $service-type admin http://10.0.0.11:35357/v2.0 - $ openstack endpoint create --region $KEYSTONE_REGION \ - $service-type internal http://10.0.0.11:5000/v2.0 - - -#. If you are using the horizon dashboard, - edit the :file:`local_settings.py` file - to include the following: - - .. code-block:: ini - - OPENSTACK_HOST = 10.0.0.11 - - diff --git a/doc/ha-guide/source/controller-ha-memcached.rst b/doc/ha-guide/source/controller-ha-memcached.rst deleted file mode 100644 index 4592ea12..00000000 --- a/doc/ha-guide/source/controller-ha-memcached.rst +++ /dev/null @@ -1,21 +0,0 @@ -=================== -Memcached -=================== - -Memcached is a general-purpose distributed memory caching system. It -is used to speed up dynamic database-driven websites by caching data -and objects in RAM to reduce the number of times an external data -source must be read. - -Memcached is a memory cache demon that can be used by most OpenStack -services to store ephemeral data, such as tokens. - -Access to memcached is not handled by HAproxy because replicated -access is currently only in an experimental state. Instead OpenStack -services must be supplied with the full list of hosts running -memcached. - -The Memcached client implements hashing to balance objects among the -instances. Failure of an instance only impacts a percentage of the -objects and the client automatically removes it from the list of -instances. The SLA is several minutes. diff --git a/doc/ha-guide/source/controller-ha-pacemaker.rst b/doc/ha-guide/source/controller-ha-pacemaker.rst deleted file mode 100644 index 71a8477f..00000000 --- a/doc/ha-guide/source/controller-ha-pacemaker.rst +++ /dev/null @@ -1,633 +0,0 @@ -======================= -Pacemaker cluster stack -======================= - -`Pacemaker `_ cluster stack is the state-of-the-art -high availability and load balancing stack for the Linux platform. -Pacemaker is useful to make OpenStack infrastructure highly available. -Also, it is storage and application-agnostic, and in no way -specific to OpenStack. - -Pacemaker relies on the -`Corosync `_ messaging layer -for reliable cluster communications. -Corosync implements the Totem single-ring ordering and membership protocol. -It also provides UDP and InfiniBand based messaging, -quorum, and cluster membership to Pacemaker. - -Pacemaker does not inherently (need or want to) understand the -applications it manages. Instead, it relies on resource agents (RAs), -scripts that encapsulate the knowledge of how to start, stop, and -check the health of each application managed by the cluster. - -These agents must conform to one of the `OCF `_, -`SysV Init `_, Upstart, or Systemd standards. - -Pacemaker ships with a large set of OCF agents (such as those managing -MySQL databases, virtual IP addresses, and RabbitMQ), but can also use -any agents already installed on your system and can be extended with -your own (see the -`developer guide `_). - -The steps to implement the Pacemaker cluster stack are: - -- :ref:`pacemaker-install` -- :ref:`pacemaker-corosync-setup` -- :ref:`pacemaker-corosync-start` -- :ref:`pacemaker-start` -- :ref:`pacemaker-cluster-properties` - -.. _pacemaker-install: - -Install packages -~~~~~~~~~~~~~~~~ - -On any host that is meant to be part of a Pacemaker cluster, -you must first establish cluster communications -through the Corosync messaging layer. -This involves installing the following packages -(and their dependencies, which your package manager -usually installs automatically): - -- pacemaker - -- pcs (CentOS or RHEL) or crmsh - -- corosync - -- fence-agents (CentOS or RHEL) or cluster-glue - -- resource-agents - -- libqb0 - -.. _pacemaker-corosync-setup: - -Set up the cluster with `pcs` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Make sure pcs is running and configured to start at boot time: - - .. code-block:: console - - $ systemctl enable pcsd - $ systemctl start pcsd - -#. Set a password for hacluster user **on each host**. - - Since the cluster is a single administrative domain, it is generally - accepted to use the same password on all nodes. - - .. code-block:: console - - $ echo my-secret-password-no-dont-use-this-one \ - | passwd --stdin hacluster - -#. Use that password to authenticate to the nodes which will - make up the cluster. The :option:`-p` option is used to give - the password on command line and makes it easier to script. - - .. code-block:: console - - $ pcs cluster auth controller1 controller2 controller3 \ - -u hacluster -p my-secret-password-no-dont-use-this-one --force - -#. Create the cluster, giving it a name, and start it: - - .. code-block:: console - - $ pcs cluster setup --force --name my-first-openstack-cluster \ - controller1 controller2 controller3 - $ pcs cluster start --all - -.. note :: - - In Red Hat Enterprise Linux or CentOS environments, this is a recommended - path to perform configuration. For more information, see the `RHEL docs - `_. - -Set up the cluster with `crmsh` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After installing the Corosync package, you must create -the :file:`/etc/corosync/corosync.conf` configuration file. - -.. note:: - For Ubuntu, you should also enable the Corosync service - in the ``/etc/default/corosync`` configuration file. - -Corosync can be configured to work -with either multicast or unicast IP addresses -or to use the votequorum library. - -- :ref:`corosync-multicast` -- :ref:`corosync-unicast` -- :ref:`corosync-votequorum` - -.. _corosync-multicast: - -Set up Corosync with multicast ------------------------------- - -Most distributions ship an example configuration file -(:file:`corosync.conf.example`) -as part of the documentation bundled with the Corosync package. -An example Corosync configuration file is shown below: - -**Example Corosync configuration file for multicast (corosync.conf)** - -.. code-block:: ini - - totem { - version: 2 - - # Time (in ms) to wait for a token (1) - token: 10000 - - # How many token retransmits before forming a new - # configuration - token_retransmits_before_loss_const: 10 - - # Turn off the virtual synchrony filter - vsftype: none - - # Enable encryption (2) - secauth: on - - # How many threads to use for encryption/decryption - threads: 0 - - # This specifies the redundant ring protocol, which may be - # none, active, or passive. (3) - rrp_mode: active - - # The following is a two-ring multicast configuration. (4) - interface { - ringnumber: 0 - bindnetaddr: 10.0.0.0 - mcastaddr: 239.255.42.1 - mcastport: 5405 - } - interface { - ringnumber: 1 - bindnetaddr: 10.0.42.0 - mcastaddr: 239.255.42.2 - mcastport: 5405 - } - } - - amf { - mode: disabled - } - - service { - # Load the Pacemaker Cluster Resource Manager (5) - ver: 1 - name: pacemaker - } - - aisexec { - user: root - group: root - } - - logging { - fileline: off - to_stderr: yes - to_logfile: no - to_syslog: yes - syslog_facility: daemon - debug: off - timestamp: on - logger_subsys { - subsys: AMF - debug: off - tags: enter|leave|trace1|trace2|trace3|trace4|trace6 - }} - -Note the following: - -- The ``token`` value specifies the time, in milliseconds, - during which the Corosync token is expected - to be transmitted around the ring. - When this timeout expires, the token is declared lost, - and after ``token_retransmits_before_loss_const lost`` tokens, - the non-responding processor (cluster node) is declared dead. - In other words, ``token × token_retransmits_before_loss_const`` - is the maximum time a node is allowed to not respond to cluster messages - before being considered dead. - The default for token is 1000 milliseconds (1 second), - with 4 allowed retransmits. - These defaults are intended to minimize failover times, - but can cause frequent "false alarms" and unintended failovers - in case of short network interruptions. The values used here are safer, - albeit with slightly extended failover times. - -- With ``secauth`` enabled, - Corosync nodes mutually authenticate using a 128-byte shared secret - stored in the :file:`/etc/corosync/authkey` file, - which may be generated with the :command:`corosync-keygen` utility. - When using ``secauth``, cluster communications are also encrypted. - -- In Corosync configurations using redundant networking - (with more than one interface), - you must select a Redundant Ring Protocol (RRP) mode other than none. - ``active`` is the recommended RRP mode. - - Note the following about the recommended interface configuration: - - - Each configured interface must have a unique ``ringnumber``, - starting with 0. - - - The ``bindnetaddr`` is the network address of the interfaces to bind to. - The example uses two network addresses of /24 IPv4 subnets. - - - Multicast groups (``mcastaddr``) must not be reused - across cluster boundaries. - In other words, no two distinct clusters - should ever use the same multicast group. - Be sure to select multicast addresses compliant with - `RFC 2365, "Administratively Scoped IP Multicast" - `_. - - - For firewall configurations, - note that Corosync communicates over UDP only, - and uses ``mcastport`` (for receives) - and ``mcastport - 1`` (for sends). - -- The service declaration for the pacemaker service - may be placed in the :file:`corosync.conf` file directly - or in its own separate file, :file:`/etc/corosync/service.d/pacemaker`. - - .. note:: - - If you are using Corosync version 2 on Ubuntu 14.04, - remove or comment out lines under the service stanza, - which enables Pacemaker to start up. Another potential - problem is the boot and shutdown order of Corosync and - Pacemaker. To force Pacemaker to start after Corosync and - stop before Corosync, fix the start and kill symlinks manually: - - .. code-block:: console - - # update-rc.d pacemaker start 20 2 3 4 5 . stop 00 0 1 6 . - - The Pacemaker service also requires an additional - configuration file ``/etc/corosync/uidgid.d/pacemaker`` - to be created with the following content: - - .. code-block:: ini - - uidgid { - uid: hacluster - gid: haclient - } - -- Once created, the :file:`corosync.conf` file - (and the :file:`authkey` file if the secauth option is enabled) - must be synchronized across all cluster nodes. - -.. _corosync-unicast: - -Set up Corosync with unicast ----------------------------- - -For environments that do not support multicast, -Corosync should be configured for unicast. -An example fragment of the :file:`corosync.conf` file -for unicastis shown below: - -**Corosync configuration file fragment for unicast (corosync.conf)** - -.. code-block:: ini - - totem { - #... - interface { - ringnumber: 0 - bindnetaddr: 10.0.0.0 - broadcast: yes (1) - mcastport: 5405 - } - interface { - ringnumber: 1 - bindnetaddr: 10.0.42.0 - broadcast: yes - mcastport: 5405 - } - transport: udpu (2) - } - - nodelist { (3) - node { - ring0_addr: 10.0.0.12 - ring1_addr: 10.0.42.12 - nodeid: 1 - } - node { - ring0_addr: 10.0.0.13 - ring1_addr: 10.0.42.13 - nodeid: 2 - } - node { - ring0_addr: 10.0.0.14 - ring1_addr: 10.0.42.14 - nodeid: 3 - } - } - #... - -Note the following: - -- If the ``broadcast`` parameter is set to yes, - the broadcast address is used for communication. - If this option is set, the ``mcastaddr`` parameter should not be set. - -- The ``transport`` directive controls the transport mechanism used. - To avoid the use of multicast entirely, - specify the ``udpu`` unicast transport parameter. - This requires specifying the list of members - in the ``nodelist`` directive; - this could potentially make up the membership before deployment. - The default is ``udp``. - The transport type can also be set to ``udpu`` or ``iba``. - -- Within the ``nodelist`` directive, - it is possible to specify specific information - about the nodes in the cluster. - The directive can contain only the node sub-directive, - which specifies every node that should be a member of the membership, - and where non-default options are needed. - Every node must have at least the ``ring0_addr`` field filled. - - .. note:: - - For UDPU, every node that should be a member - of the membership must be specified. - - Possible options are: - - - ``ring{X}_addr`` specifies the IP address of one of the nodes. - {X} is the ring number. - - - ``nodeid`` is optional - when using IPv4 and required when using IPv6. - This is a 32-bit value specifying the node identifier - delivered to the cluster membership service. - If this is not specified with IPv4, - the node id is determined from the 32-bit IP address - of the system to which the system is bound with ring identifier of 0. - The node identifier value of zero is reserved and should not be used. - - -.. _corosync-votequorum: - -Set up Corosync with votequorum library ---------------------------------------- - -The votequorum library is part of the corosync project. -It provides an interface to the vote-based quorum service -and it must be explicitly enabled in the Corosync configuration file. -The main role of votequorum library is to avoid split-brain situations, -but it also provides a mechanism to: - -- Query the quorum status - -- Get a list of nodes known to the quorum service - -- Receive notifications of quorum state changes - -- Change the number of votes assigned to a node - -- Change the number of expected votes for a cluster to be quorate - -- Connect an additional quorum device - to allow small clusters remain quorate during node outages - -The votequorum library has been created to replace and eliminate -qdisk, the disk-based quorum daemon for CMAN, -from advanced cluster configurations. - -A sample votequorum service configuration -in the :file:`corosync.conf` file is: - -.. code-block:: ini - - quorum { - provider: corosync_votequorum (1) - expected_votes: 7 (2) - wait_for_all: 1 (3) - last_man_standing: 1 (4) - last_man_standing_window: 10000 (5) - } - -Note the following: - -- Specifying ``corosync_votequorum`` enables the votequorum library; - this is the only required option. - -- The cluster is fully operational with ``expected_votes`` set to 7 nodes - (each node has 1 vote), quorum: 4. - If a list of nodes is specified as ``nodelist``, - the ``expected_votes`` value is ignored. - -- Setting ``wait_for_all`` to 1 means that, - When starting up a cluster (all nodes down), - the cluster quorum is held until all nodes are online - and have joined the cluster for the first time. - This parameter is new in Corosync 2.0. - -- Setting ``last_man_standing`` to 1 enables - the Last Man Standing (LMS) feature; - by default, it is disabled (set to 0). - If a cluster is on the quorum edge - (``expected_votes:`` set to 7; ``online nodes:`` set to 4) - for longer than the time specified - for the ``last_man_standing_window`` parameter, - the cluster can recalculate quorum and continue operating - even if the next node will be lost. - This logic is repeated until the number of online nodes - in the cluster reaches 2. - In order to allow the cluster to step down from 2 members to only 1, - the ``auto_tie_breaker`` parameter needs to be set; - this is not recommended for production environments. - -- ``last_man_standing_window`` specifies the time, in milliseconds, - required to recalculate quorum after one or more hosts - have been lost from the cluster. - To do the new quorum recalculation, - the cluster must have quorum for at least the interval - specified for ``last_man_standing_window``; - the default is 10000ms. - - -.. _pacemaker-corosync-start: - -Start Corosync --------------- - -``Corosync`` is started as a regular system service. -Depending on your distribution, it may ship with an LSB init script, -an upstart job, or a systemd unit file. -Either way, the service is usually named ``corosync``: - -- To start ``corosync`` with the LSB init script: - - .. code-block:: console - - # /etc/init.d/corosync start - -- Alternatively: - - .. code-block:: console - - # service corosync start - -- To start ``corosync`` with upstart: - - .. code-block:: console - - # start corosync - -- To start ``corosync`` with systemd unit file: - - .. code-block:: console - - # systemctl start corosync - -You can now check the ``corosync`` connectivity with one of these tools. - -Use the :command:`corosync-cfgtool` utility with the :option:`-s` option -to get a summary of the health of the communication rings: - -.. code-block:: console - - # corosync-cfgtool -s - Printing ring status. - Local node ID 435324542 - RING ID 0 - id = 10.0.0.82 - status = ring 0 active with no faults - RING ID 1 - id = 10.0.42.100 - status = ring 1 active with no faults - -Use the :command:`corosync-objctl` utility -to dump the Corosync cluster member list: - -.. code-block:: console - - # corosync-objctl runtime.totem.pg.mrp.srp.members - runtime.totem.pg.mrp.srp.435324542.ip=r(0) ip(10.0.0.82) r(1) ip(10.0.42.100) - runtime.totem.pg.mrp.srp.435324542.join_count=1 - runtime.totem.pg.mrp.srp.435324542.status=joined - runtime.totem.pg.mrp.srp.983895584.ip=r(0) ip(10.0.0.87) r(1) ip(10.0.42.254) - runtime.totem.pg.mrp.srp.983895584.join_count=1 - runtime.totem.pg.mrp.srp.983895584.status=joined - -You should see a ``status=joined`` entry -for each of your constituent cluster nodes. - -[TODO: Should the main example now use corosync-cmapctl and have the note -give the command for Corosync version 1?] - -.. note:: - - If you are using Corosync version 2, use the :command:`corosync-cmapctl` - utility instead of :command:`corosync-objctl`; it is a direct replacement. - -.. _pacemaker-start: - -Start Pacemaker ---------------- - -After the ``corosync`` service have been started -and you have verified that the cluster is communicating properly, -you can start :command:`pacemakerd`, the Pacemaker master control process. -Choose one from the following four ways to start it: - -- To start ``pacemaker`` with the LSB init script: - - .. code-block:: console - - # /etc/init.d/pacemaker start - -- Alternatively: - - .. code-block:: console - - # service pacemaker start - -- To start ``pacemaker`` with upstart: - - .. code-block:: console - - # start pacemaker - -- To start ``pacemaker`` with the systemd unit file: - - .. code-block:: console - - # systemctl start pacemaker - -After the ``pacemaker`` service have started, -Pacemaker creates a default empty cluster configuration with no resources. -Use the :command:`crm_mon` utility to observe the status of ``pacemaker``: - -.. code-block:: console - - ============ - Last updated: Sun Oct 7 21:07:52 2012 - Last change: Sun Oct 7 20:46:00 2012 via cibadmin on controller2 - Stack: openais - Current DC: controller2 - partition with quorum - Version: 1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c - 3 Nodes configured, 3 expected votes - 0 Resources configured. - ============ - - Online: [ controller3 controller2 controller1 ] - -.. _pacemaker-cluster-properties: - -Set basic cluster properties -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After you set up your Pacemaker cluster, -you should set a few basic cluster properties: - -``crmsh`` - -.. code-block:: console - - $ crm configure property pe-warn-series-max="1000" \ - pe-input-series-max="1000" \ - pe-error-series-max="1000" \ - cluster-recheck-interval="5min" - -``pcs`` - -.. code-block:: console - - $ pcs property set pe-warn-series-max=1000 \ - pe-input-series-max=1000 \ - pe-error-series-max=1000 \ - cluster-recheck-interval=5min - -Note the following: - -- Setting the ``pe-warn-series-max``, ``pe-input-series-max`` - and ``pe-error-series-max`` parameters to 1000 - instructs Pacemaker to keep a longer history of the inputs processed - and errors and warnings generated by its Policy Engine. - This history is useful if you need to troubleshoot the cluster. - -- Pacemaker uses an event-driven approach to cluster state processing. - The ``cluster-recheck-interval`` parameter (which defaults to 15 minutes) - defines the interval at which certain Pacemaker actions occur. - It is usually prudent to reduce this to a shorter interval, - such as 5 or 3 minutes. - -After you make these changes, you may commit the updated configuration. diff --git a/doc/ha-guide/source/controller-ha-rabbitmq.rst b/doc/ha-guide/source/controller-ha-rabbitmq.rst deleted file mode 100644 index 02909b56..00000000 --- a/doc/ha-guide/source/controller-ha-rabbitmq.rst +++ /dev/null @@ -1,310 +0,0 @@ -======== -RabbitMQ -======== - -An AMQP (Advanced Message Queuing Protocol) compliant message bus is -required for most OpenStack components in order to coordinate the -execution of jobs entered into the system. - -The most popular AMQP implementation used in OpenStack installations -is RabbitMQ. - -RabbitMQ nodes fail over both on the application and the -infrastructure layers. - -The application layer is controlled by the ``oslo.messaging`` -configuration options for multiple AMQP hosts. If the AMQP node fails, -the application reconnects to the next one configured within the -specified reconnect interval. The specified reconnect interval -constitutes its SLA. - -On the infrastructure layer, the SLA is the time for which RabbitMQ -cluster reassembles. Several cases are possible. The Mnesia keeper -node is the master of the corresponding Pacemaker resource for -RabbitMQ; when it fails, the result is a full AMQP cluster downtime -interval. Normally, its SLA is no more than several minutes. Failure -of another node that is a slave of the corresponding Pacemaker -resource for RabbitMQ results in no AMQP cluster downtime at all. - -Making the RabbitMQ service highly available involves the following steps: - -- :ref:`Install RabbitMQ` - -- :ref:`Configure RabbitMQ for HA queues` - -- :ref:`Configure OpenStack services to use Rabbit HA queues - ` - -.. note:: - - Access to RabbitMQ is not normally handled by HAproxy. Instead, - consumers must be supplied with the full list of hosts running - RabbitMQ with ``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` - option. - - Jon Eck found the `core issue - `_ - and went into some detail regarding the `history and solution - `_ - on his blog. - - In summary though: - - The source address for the connection from HAProxy back to the - client is the VIP address. However the VIP address is no longer - present on the host. This means that the network (IP) layer - deems the packet unroutable, and informs the transport (TCP) - layer. TCP, however, is a reliable transport. It knows how to - handle transient errors and will retry. And so it does. - - In this case that is a problem though, because: - - TCP generally holds on to hope for a long time. A ballpark - estimate is somewhere on the order of tens of minutes (30 - minutes is commonly referenced). During this time it will keep - probing and trying to deliver the data. - - It is important to note that HAProxy has no idea that any of this is - happening. As far as its process is concerned, it called - ``write()`` with the data and the kernel returned success. The - resolution is already understood and just needs to make its way - through a review. - -.. _rabbitmq-install: - -Install RabbitMQ -~~~~~~~~~~~~~~~~ - -The commands for installing RabbitMQ are specific to the Linux distribution -you are using: - -.. list-table:: Install RabbitMQ - :widths: 15 30 - :header-rows: 1 - - * - Distribution - - Command - * - Ubuntu, Debian - - :command:`# apt-get install rabbitmq-server` - * - RHEL, Fedora, CentOS - - :command:`# yum install rabbitmq-server` - * - openSUSE - - :command:`# zypper install rabbitmq-server` - * - SLES 12 - - :command:`# zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo` - - [Verify fingerprint of imported GPG key; see below] - - :command:`# zypper install rabbitmq-server` - - -.. note:: - - For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. - You should verify the fingerprint of the imported GPG key before using it. - - :: - - Key ID: 893A90DAD85F9316 - Key Name: Cloud:OpenStack OBS Project - Key Fingerprint: 35B34E18ABC1076D66D5A86B893A90DAD85F9316 - Key Created: Tue Oct 8 13:34:21 2013 - Key Expires: Thu Dec 17 13:34:21 2015 - -For more information, -see the official installation manual for the distribution: - -- `Debian and Ubuntu `_ -- `RPM based `_ - (RHEL, Fedora, CentOS, openSUSE) - -.. _rabbitmq-configure: - -Configure RabbitMQ for HA queues -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -[TODO: This section should begin with a brief mention -about what HA queues are and why they are valuable, etc] - -We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, -which is a logical grouping of several Erlang nodes. - -The following components/services can work with HA queues: - -[TODO: replace "currently" with specific release names] - -[TODO: Does this list need to be updated? Perhaps we need a table -that shows each component and the earliest release that allows it -to work with HA queues.] - -- OpenStack Compute -- OpenStack Block Storage -- OpenStack Networking -- Telemetry - -We have to consider that, while exchanges and bindings -survive the loss of individual nodes, -queues and their messages do not -because a queue and its contents are located on one node. -If we lose this node, we also lose the queue. - -Mirrored queues in RabbitMQ improve -the availability of service since it is resilient to failures. - -Production servers should run (at least) three RabbitMQ servers; -for testing and demonstration purposes, -it is possible to run only two servers. -In this section, we configure two nodes, -called ``rabbit1`` and ``rabbit2``. -To build a broker, we need to ensure -that all nodes have the same Erlang cookie file. - -[TODO: Should the example instead use a minimum of three nodes?] - -#. To do so, stop RabbitMQ everywhere and copy the cookie - from the first node to each of the other node(s): - - .. code-block:: console - - # scp /var/lib/rabbitmq/.erlang.cookie root@NODE:/var/lib/rabbitmq/.erlang.cookie - -#. On each target node, verify the correct owner, - group, and permissions of the file :file:`erlang.cookie`. - - .. code-block:: console - - # chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie - # chmod 400 /var/lib/rabbitmq/.erlang.cookie - -#. Start the message queue service on all nodes and configure it to start - when the system boots. - - On Ubuntu, it is configured by default. - - On CentOS, RHEL, openSUSE, and SLES: - - .. code-block:: console - - # systemctl enable rabbitmq-server.service - # systemctl start rabbitmq-server.service - -#. Verify that the nodes are running: - - .. code-block:: console - - # rabbitmqctl cluster_status - Cluster status of node rabbit@NODE... - [{nodes,[{disc,[rabbit@NODE]}]}, - {running_nodes,[rabbit@NODE]}, - {partitions,[]}] - ...done. - -#. Run the following commands on each node except the first one: - - .. code-block:: console - - # rabbitmqctl stop_app - Stopping node rabbit@NODE... - ...done. - # rabbitmqctl join_cluster --ram rabbit@rabbit1 - # rabbitmqctl start_app - Starting node rabbit@NODE ... - ...done. - -.. note:: - - The default node type is a disc node. In this guide, nodes - join the cluster as RAM nodes. - -#. To verify the cluster status: - - .. code-block:: console - - # rabbitmqctl cluster_status - Cluster status of node rabbit@NODE... - [{nodes,[{disc,[rabbit@rabbit1]},{ram,[rabbit@NODE]}]}, \ - {running_nodes,[rabbit@NODE,rabbit@rabbit1]}] - - If the cluster is working, - you can create usernames and passwords for the queues. - -#. To ensure that all queues except those with auto-generated names - are mirrored across all running nodes, - set the ``ha-mode`` policy key to all - by running the following command on one of the nodes: - - .. code-block:: console - - # rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}' - -More information is available in the RabbitMQ documentation: - -- `Highly Available Queues `_ -- `Clustering Guide `_ - -.. note:: - - As another option to make RabbitMQ highly available, RabbitMQ contains the - OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. - It provides the active/active RabbitMQ cluster with mirrored queues. - For more information, see `Auto-configuration of a cluster with - a Pacemaker `_. - -.. _rabbitmq-services: - -Configure OpenStack services to use Rabbit HA queues -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -We have to configure the OpenStack components -to use at least two RabbitMQ nodes. - -Do this configuration on all services using RabbitMQ: - -#. RabbitMQ HA cluster host:port pairs: - - :: - - rabbit_hosts=rabbit1:5672,rabbit2:5672,rabbit3:5672 - -#. How frequently to retry connecting with RabbitMQ: - [TODO: document the unit of measure here? Seconds?] - - :: - - rabbit_retry_interval=1 - -#. How long to back-off for between retries when connecting to RabbitMQ: - [TODO: document the unit of measure here? Seconds?] - - :: - - rabbit_retry_backoff=2 - -#. Maximum retries with trying to connect to RabbitMQ (infinite by default): - - :: - - rabbit_max_retries=0 - -#. Use durable queues in RabbitMQ: - - :: - - rabbit_durable_queues=true - -#. Use HA queues in RabbitMQ (x-ha-policy: all): - - :: - - rabbit_ha_queues=true - -.. note:: - - If you change the configuration from an old set-up - that did not use HA queues, you should restart the service: - - .. code-block:: console - - # rabbitmqctl stop_app - # rabbitmqctl reset - # rabbitmqctl start_app diff --git a/doc/ha-guide/source/controller-ha-telemetry.rst b/doc/ha-guide/source/controller-ha-telemetry.rst deleted file mode 100644 index 51de7b25..00000000 --- a/doc/ha-guide/source/controller-ha-telemetry.rst +++ /dev/null @@ -1,78 +0,0 @@ - -========= -Telemetry -========= - -[TODO (Add Telemetry overview)] - -Telemetry central agent -~~~~~~~~~~~~~~~~~~~~~~~ - -The Telemetry central agent can be configured to partition its polling -workload between multiple agents, enabling high availability. - -Both the central and the compute agent can run in an HA deployment, -which means that multiple instances of these services can run in -parallel with workload partitioning among these running instances. - -The `Tooz `__ library provides -the coordination within the groups of service instances. -It provides an API above several back ends that can be used for building -distributed applications. - -Tooz supports -`various drivers `__ -including the following back end solutions: - -* `Zookeeper `__. - Recommended solution by the Tooz project. - -* `Redis `__. - Recommended solution by the Tooz project. - -* `Memcached `__. - Recommended for testing. - -You must configure a supported Tooz driver for the HA deployment of -the Telemetry services. - -For information about the required configuration options that have -to be set in the :file:`ceilometer.conf` configuration file for both -the central and compute agents, see the `coordination section -`__ -in the OpenStack Configuration Reference. - -.. note:: Without the ``backend_url`` option being set only one - instance of both the central and compute agent service is able to run - and function correctly. - -The availability check of the instances is provided by heartbeat messages. -When the connection with an instance is lost, the workload will be -reassigned within the remained instances in the next polling cycle. - -.. note:: Memcached uses a timeout value, which should always be set to - a value that is higher than the heartbeat value set for Telemetry. - -For backward compatibility and supporting existing deployments, the central -agent configuration also supports using different configuration files for -groups of service instances of this type that are running in parallel. -For enabling this configuration, set a value for the partitioning_group_prefix -option in the `central section `__ -in the OpenStack Configuration Reference. - -.. warning:: For each sub-group of the central agent pool with the same - ``partitioning_group_prefix`` a disjoint subset of meters must be polled -- - otherwise samples may be missing or duplicated. The list of meters to poll - can be set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. - For more information about pipelines see the `Data collection and - processing - `__ - section. - -To enable the compute agent to run multiple instances simultaneously with -workload partitioning, the workload_partitioning option has to be set to -``True`` under the `compute section `__ -in the :file:`ceilometer.conf` configuration file. diff --git a/doc/ha-guide/source/controller-ha-vip.rst b/doc/ha-guide/source/controller-ha-vip.rst deleted file mode 100644 index b46adc81..00000000 --- a/doc/ha-guide/source/controller-ha-vip.rst +++ /dev/null @@ -1,24 +0,0 @@ - -================= -Configure the VIP -================= - -You must select and assign a virtual IP address (VIP) -that can freely float between cluster nodes. - -This configuration creates ``vip``, -a virtual IP address for use by the API node (``10.0.0.11``): - -For ``crmsh``: - -.. code-block:: console - - primitive vip ocf:heartbeat:IPaddr2 \ - params ip="10.0.0.11" cidr_netmask="24" op monitor interval="30s" - -For ``pcs``: - -.. code-block:: console - - # pcs resource create vip ocf:heartbeat:IPaddr2 \ - params ip="10.0.0.11" cidr_netmask="24" op monitor interval="30s" diff --git a/doc/ha-guide/source/controller-ha.rst b/doc/ha-guide/source/controller-ha.rst deleted file mode 100644 index 7ba0d485..00000000 --- a/doc/ha-guide/source/controller-ha.rst +++ /dev/null @@ -1,20 +0,0 @@ - -================================================ -Configuring the controller for high availability -================================================ - -The cloud controller runs on the management network -and must talk to all other services. - -.. toctree:: - :maxdepth: 2 - - controller-ha-pacemaker.rst - controller-ha-vip.rst - controller-ha-haproxy.rst - controller-ha-galera.rst - controller-ha-memcached.rst - controller-ha-rabbitmq.rst - controller-ha-keystone.rst - controller-ha-telemetry.rst - diff --git a/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png b/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png deleted file mode 100644 index 91feec0b..00000000 Binary files a/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png and /dev/null differ diff --git a/doc/ha-guide/source/figures/Cluster-deployment-segregated.png b/doc/ha-guide/source/figures/Cluster-deployment-segregated.png deleted file mode 100644 index a504ae18..00000000 Binary files a/doc/ha-guide/source/figures/Cluster-deployment-segregated.png and /dev/null differ diff --git a/doc/ha-guide/source/figures/keepalived-arch.jpg b/doc/ha-guide/source/figures/keepalived-arch.jpg deleted file mode 100644 index cb9558ee..00000000 Binary files a/doc/ha-guide/source/figures/keepalived-arch.jpg and /dev/null differ diff --git a/doc/ha-guide/source/hardware-ha-basic.rst b/doc/ha-guide/source/hardware-ha-basic.rst deleted file mode 100644 index 31669dc6..00000000 --- a/doc/ha-guide/source/hardware-ha-basic.rst +++ /dev/null @@ -1,47 +0,0 @@ - -============== -Hardware setup -============== - -The standard hardware requirements: - -- `Provider networks `_ -- `Self-service networks `_ - -However, OpenStack does not require a significant amount of resources -and the following minimum requirements should support -a proof-of-concept high availability environment -with core services and several instances: - -[TODO: Verify that these numbers are good] - -+-------------------+------------+----------+-----------+------+ -| Node type | Processor | Memory | Storage | NIC | -+===================+============+==========+===========+======+ -| controller node | 1-2 | 8 GB | 100 GB | 2 | -+-------------------+------------+----------+-----------+------+ -| compute node | 2-4+ | 8+ GB | 100+ GB | 2 | -+-------------------+------------+----------+-----------+------+ - - -For demonstrations and studying, -you can set up a test environment on virtual machines (VMs). -This has the following benefits: - -- One physical server can support multiple nodes, - each of which supports almost any number of network interfaces. - -- Ability to take periodic "snap shots" throughout the installation process - and "roll back" to a working configuration in the event of a problem. - -However, running an OpenStack environment on VMs -degrades the performance of your instances, -particularly if your hypervisor and/or processor lacks support -for hardware acceleration of nested VMs. - -.. note:: - - When installing highly-available OpenStack on VMs, - be sure that your hypervisor permits promiscuous mode - and disables MAC address filtering on the external network. - diff --git a/doc/ha-guide/source/hardware-ha.rst b/doc/ha-guide/source/hardware-ha.rst deleted file mode 100644 index 91c03fc8..00000000 --- a/doc/ha-guide/source/hardware-ha.rst +++ /dev/null @@ -1,15 +0,0 @@ - -============================================= -Hardware considerations for high availability -============================================= - -[TODO: Provide a minimal architecture example for HA, -expanded on that given in -http://docs.openstack.org/liberty/install-guide-ubuntu/environment.html -for easy comparison] - - -.. toctree:: - :maxdepth: 2 - - hardware-ha-basic.rst diff --git a/doc/ha-guide/source/index.rst b/doc/ha-guide/source/index.rst deleted file mode 100644 index e65f1250..00000000 --- a/doc/ha-guide/source/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -================================= -OpenStack High Availability Guide -================================= - -Abstract -~~~~~~~~ - -This guide describes how to install and configure -OpenStack for high availability. -It supplements the OpenStack Installation Guides -and assumes that you are familiar with the material in those guides. - -This guide documents OpenStack Mitaka, OpenStack Liberty, and OpenStack -Kilo releases. - -.. warning:: This guide is a work-in-progress and changing rapidly - while we continue to test and enhance the guidance. Please note - where there are open "to do" items and help where you are able. - -Contents -~~~~~~~~ - -.. toctree:: - :maxdepth: 2 - - common/conventions.rst - intro-ha.rst - hardware-ha.rst - install-ha.rst - networking-ha.rst - controller-ha.rst - storage-ha.rst - compute-node-ha.rst - noncore-ha.rst - - - common/app_support.rst - common/glossary.rst - -Search in this guide -~~~~~~~~~~~~~~~~~~~~ - -* :ref:`search` diff --git a/doc/ha-guide/source/install-ha-memcached.rst b/doc/ha-guide/source/install-ha-memcached.rst deleted file mode 100644 index c9942c71..00000000 --- a/doc/ha-guide/source/install-ha-memcached.rst +++ /dev/null @@ -1,42 +0,0 @@ - -================= -Install memcached -================= - -[TODO: Verify that Oslo supports hash synchronization; -if so, this should not take more than load balancing.] - -[TODO: This hands off to two different docs for install information. -We should choose one or explain the specific purpose of each.] - -Most OpenStack services can use memcached -to store ephemeral data such as tokens. -Although memcached does not support -typical forms of redundancy such as clustering, -OpenStack services can use almost any number of instances -by configuring multiple hostnames or IP addresses. -The memcached client implements hashing -to balance objects among the instances. -Failure of an instance only impacts a percentage of the objects -and the client automatically removes it from the list of instances. - -To install and configure memcached, read the -`official documentation `_. - -Memory caching is managed by `oslo.cache -`_ -so the way to use multiple memcached servers is the same for all projects. - -[TODO: Should this show three hosts?] - -Example configuration with two hosts: - -:: - - memcached_servers = controller1:11211,controller2:11211 - -By default, `controller1` handles the caching service but, -if the host goes down, `controller2` does the job. -For more information about memcached installation, -see the `OpenStack Administrator Guide -`_. diff --git a/doc/ha-guide/source/install-ha-ntp.rst b/doc/ha-guide/source/install-ha-ntp.rst deleted file mode 100644 index c17eec9f..00000000 --- a/doc/ha-guide/source/install-ha-ntp.rst +++ /dev/null @@ -1,9 +0,0 @@ -============= -Configure NTP -============= - -You must configure NTP to properly synchronize services among nodes. -We recommend that you configure the controller node to reference -more accurate (lower stratum) servers and other nodes to reference -the controller node. For more information, see the -`Install Guides `_. diff --git a/doc/ha-guide/source/install-ha-os.rst b/doc/ha-guide/source/install-ha-os.rst deleted file mode 100644 index 22a609c7..00000000 --- a/doc/ha-guide/source/install-ha-os.rst +++ /dev/null @@ -1,24 +0,0 @@ -===================================== -Install operating system on each node -===================================== - -The first step in setting up your highly-available OpenStack cluster -is to install the operating system on each node. -Follow the instructions in the OpenStack Installation Guides: - -- `CentOS and RHEL `_ -- `openSUSE and SUSE Linux Enterprise Server `_ -- `Ubuntu `_ - -The OpenStack Installation Guides also include a list of the services -that use passwords with important notes about using them. - -This guide uses the following example IP addresses: - -.. code-block:: none - - # controller - 10.0.0.11 controller # virtual IP - 10.0.0.12 controller1 - 10.0.0.13 controller2 - 10.0.0.14 controller3 diff --git a/doc/ha-guide/source/install-ha.rst b/doc/ha-guide/source/install-ha.rst deleted file mode 100644 index 7fcfdb6d..00000000 --- a/doc/ha-guide/source/install-ha.rst +++ /dev/null @@ -1,12 +0,0 @@ -===================================== -Installing high availability packages -===================================== - -[TODO -- write intro to this section] - -.. toctree:: - :maxdepth: 2 - - install-ha-os.rst - install-ha-memcached.rst - install-ha-ntp.rst diff --git a/doc/ha-guide/source/intro-ha-arch-keepalived.rst b/doc/ha-guide/source/intro-ha-arch-keepalived.rst deleted file mode 100644 index f1fca2c3..00000000 --- a/doc/ha-guide/source/intro-ha-arch-keepalived.rst +++ /dev/null @@ -1,96 +0,0 @@ -============================ -The keepalived architecture -============================ - -High availability strategies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following diagram shows a very simplified view of the different -strategies used to achieve high availability for the OpenStack -services: - -.. image:: /figures/keepalived-arch.jpg - :width: 100% - -Depending on the method used to communicate with the service, the -following availability strategies will be followed: - -- Keepalived, for the HAProxy instances. -- Access via an HAProxy virtual IP, for services such as HTTPd that - are accessed via a TCP socket that can be load balanced -- Built-in application clustering, when available from the application. - Galera is one example of this. -- Starting up one instance of the service on several controller nodes, - when they can coexist and coordinate by other means. RPC in - ``nova-conductor`` is one example of this. -- No high availability, when the service can only work in - active/passive mode. - -There are known issues with cinder-volume that recommend setting it as -active-passive for now, see: -https://blueprints.launchpad.net/cinder/+spec/cinder-volume-active-active-support - -While there will be multiple neutron LBaaS agents running, each agent -will manage a set of load balancers, that cannot be failed over to -another node. - -Architecture limitations -~~~~~~~~~~~~~~~~~~~~~~~~ - -This architecture has some inherent limitations that should be kept in -mind during deployment and daily operations. -The following sections describe these limitations. - -#. Keepalived and network partitions - - In case of a network partitioning, there is a chance that two or - more nodes running keepalived claim to hold the same VIP, which may - lead to an undesired behaviour. Since keepalived uses VRRP over - multicast to elect a master (VIP owner), a network partition in - which keepalived nodes cannot communicate will result in the VIPs - existing on two nodes. When the network partition is resolved, the - duplicate VIPs should also be resolved. Note that this network - partition problem with VRRP is a known limitation for this - architecture. - -#. Cinder-volume as a single point of failure - - There are currently concerns over the cinder-volume service ability - to run as a fully active-active service. During the Mitaka - timeframe, this is being worked on, see: - https://blueprints.launchpad.net/cinder/+spec/cinder-volume-active-active-support - Thus, cinder-volume will only be running on one of the controller - nodes, even if it will be configured on all nodes. In case of a - failure in the node running cinder-volume, it should be started in - a surviving controller node. - -#. Neutron-lbaas-agent as a single point of failure - - The current design of the neutron LBaaS agent using the HAProxy - driver does not allow high availability for the tenant load - balancers. The neutron-lbaas-agent service will be enabled and - running on all controllers, allowing for load balancers to be - distributed across all nodes. However, a controller node failure - will stop all load balancers running on that node until the service - is recovered or the load balancer is manually removed and created - again. - -#. Service monitoring and recovery required - - An external service monitoring infrastructure is required to check - the OpenStack service health, and notify operators in case of any - failure. This architecture does not provide any facility for that, - so it would be necessary to integrate the OpenStack deployment with - any existing monitoring environment. - -#. Manual recovery after a full cluster restart - - Some support services used by RDO or RHEL OSP use their own form of - application clustering. Usually, these services maintain a cluster - quorum, that may be lost in case of a simultaneous restart of all - cluster nodes, for example during a power outage. Each service will - require its own procedure to regain quorum. - -If you find any or all of these limitations concerning, you are -encouraged to refer to the -:doc:`Pacemaker HA architecture` instead. diff --git a/doc/ha-guide/source/intro-ha-arch-pacemaker.rst b/doc/ha-guide/source/intro-ha-arch-pacemaker.rst deleted file mode 100644 index e81cc523..00000000 --- a/doc/ha-guide/source/intro-ha-arch-pacemaker.rst +++ /dev/null @@ -1,198 +0,0 @@ -========================== -The Pacemaker architecture -========================== - -What is a cluster manager -~~~~~~~~~~~~~~~~~~~~~~~~~ - -At its core, a cluster is a distributed finite state machine capable -of co-ordinating the startup and recovery of inter-related services -across a set of machines. - -Even a distributed and/or replicated application that is able to -survive failures on one or more machines can benefit from a -cluster manager: - -#. Awareness of other applications in the stack - - While SYS-V init replacements like systemd can provide - deterministic recovery of a complex stack of services, the - recovery is limited to one machine and lacks the context of what - is happening on other machines - context that is crucial to - determine the difference between a local failure, clean startup - and recovery after a total site failure. - -#. Awareness of instances on other machines - - Services like RabbitMQ and Galera have complicated boot-up - sequences that require co-ordination, and often serialization, of - startup operations across all machines in the cluster. This is - especially true after site-wide failure or shutdown where we must - first determine the last machine to be active. - -#. A shared implementation and calculation of `quorum - `_. - - It is very important that all members of the system share the same - view of who their peers are and whether or not they are in the - majority. Failure to do this leads very quickly to an internal - `split-brain `_ - state - where different parts of the system are pulling in - different and incompatible directions. - -#. Data integrity through fencing (a non-responsive process does not - imply it is not doing anything) - - A single application does not have sufficient context to know the - difference between failure of a machine and failure of the - applcation on a machine. The usual practice is to assume the - machine is dead and carry on, however this is highly risky - a - rogue process or machine could still be responding to requests and - generally causing havoc. The safer approach is to make use of - remotely accessible power switches and/or network switches and SAN - controllers to fence (isolate) the machine before continuing. - -#. Automated recovery of failed instances - - While the application can still run after the failure of several - instances, it may not have sufficient capacity to serve the - required volume of requests. A cluster can automatically recover - failed instances to prevent additional load induced failures. - -For this reason, the use of a cluster manager like `Pacemaker -`_ is highly recommended. - -Deployment flavors -~~~~~~~~~~~~~~~~~~ - -It is possible to deploy three different flavors of the Pacemaker -architecture. The two extremes are **Collapsed** (where every -component runs on every node) and **Segregated** (where every -component runs in its own 3+ node cluster). - -Regardless of which flavor you choose, it is recommended that the -clusters contain at least three nodes so that we can take advantage of -`quorum `_. - -Quorum becomes important when a failure causes the cluster to split in -two or more partitions. In this situation, you want the majority to -ensure the minority are truly dead (through fencing) and continue to -host resources. For a two-node cluster, no side has the majority and -you can end up in a situation where both sides fence each other, or -both sides are running the same services - leading to data corruption. - -Clusters with an even number of hosts suffer from similar issues - a -single network failure could easily cause a N:N split where neither -side retains a majority. For this reason, we recommend an odd number -of cluster members when scaling up. - -You can have up to 16 cluster members (this is currently limited by -the ability of corosync to scale higher). In extreme cases, 32 and -even up to 64 nodes could be possible, however, this is not well tested. - -Collapsed ---------- - -In this configuration, there is a single cluster of 3 or more -nodes on which every component is running. - -This scenario has the advantage of requiring far fewer, if more -powerful, machines. Additionally, being part of a single cluster -allows us to accurately model the ordering dependencies between -components. - -This scenario can be visualized as below. - -.. image:: /figures/Cluster-deployment-collapsed.png - :width: 100% - -You would choose this option if you prefer to have fewer but more -powerful boxes. - -This is the most common option and the one we document here. - -Segregated ----------- - -In this configuration, each service runs in a dedicated cluster of -3 or more nodes. - -The benefits of this approach are the physical isolation between -components and the ability to add capacity to specific components. - -You would choose this option if you prefer to have more but -less powerful boxes. - -This scenario can be visualized as below, where each box below -represents a cluster of three or more guests. - -.. image:: /figures/Cluster-deployment-segregated.png - :width: 100% - -Mixed ------ - -It is also possible to follow a segregated approach for one or more -components that are expected to be a bottleneck and use a collapsed -approach for the remainder. - - -Proxy server -~~~~~~~~~~~~ - -Almost all services in this stack benefit from being proxied. -Using a proxy server provides: - -#. Load distribution - - Many services can act in an active/active capacity, however, they - usually require an external mechanism for distributing requests to - one of the available instances. The proxy server can serve this - role. - -#. API isolation - - By sending all API access through the proxy, we can clearly - identify service interdependencies. We can also move them to - locations other than ``localhost`` to increase capacity if the - need arises. - -#. Simplified process for adding/removing of nodes - - Since all API access is directed to the proxy, adding or removing - nodes has no impact on the configuration of other services. This - can be very useful in upgrade scenarios where an entirely new set - of machines can be configured and tested in isolation before - telling the proxy to direct traffic there instead. - -#. Enhanced failure detection - - The proxy can be configured as a secondary mechanism for detecting - service failures. It can even be configured to look for nodes in - a degraded state (such as being 'too far' behind in the - replication) and take them out of circulation. - -The following components are currently unable to benefit from the use -of a proxy server: - -* RabbitMQ -* Memcached -* MongoDB - -However, the reasons vary and are discussed under each component's -heading. - -We recommend HAProxy as the load balancer, however, there are many -alternatives in the marketplace. - -We use a check interval of 1 second, however, the timeouts vary by service. - -Generally, we use round-robin to distribute load amongst instances of -active/active services, however, Galera uses the ``stick-table`` options -to ensure that incoming connections to the virtual IP (VIP) should be -directed to only one of the available back ends. - -In Galera's case, although it can run active/active, this helps avoid -lock contention and prevent deadlocks. It is used in combination with -the ``httpchk`` option that ensures only nodes that are in sync with its -peers are allowed to handle requests. diff --git a/doc/ha-guide/source/intro-ha-compute.rst b/doc/ha-guide/source/intro-ha-compute.rst deleted file mode 100644 index 76395d25..00000000 --- a/doc/ha-guide/source/intro-ha-compute.rst +++ /dev/null @@ -1,4 +0,0 @@ - -========================================== -Overview of highly-available compute nodes -========================================== diff --git a/doc/ha-guide/source/intro-ha-concepts.rst b/doc/ha-guide/source/intro-ha-concepts.rst deleted file mode 100644 index 3414dcfa..00000000 --- a/doc/ha-guide/source/intro-ha-concepts.rst +++ /dev/null @@ -1,213 +0,0 @@ -========================== -High availability concepts -========================== - -High availability systems seek to minimize two things: - -**System downtime** - Occurs when a user-facing service is unavailable - beyond a specified maximum amount of time. - -**Data loss** - Accidental deletion or destruction of data. - -Most high availability systems guarantee protection against system downtime -and data loss only in the event of a single failure. -However, they are also expected to protect against cascading failures, -where a single failure deteriorates into a series of consequential failures. -Many service providers guarantee :term:`Service Level Agreement (SLA)` -including uptime percentage of computing service, which is calculated based -on the available time and system downtime excluding planned outage time. - -Redundancy and failover -~~~~~~~~~~~~~~~~~~~~~~~ - -High availability is implemented with redundant hardware -running redundant instances of each service. -If one piece of hardware running one instance of a service fails, -the system can then failover to use another instance of a service -that is running on hardware that did not fail. - -A crucial aspect of high availability -is the elimination of single points of failure (SPOFs). -A SPOF is an individual piece of equipment or software -that causes system downtime or data loss if it fails. -In order to eliminate SPOFs, check that mechanisms exist for redundancy of: - -- Network components, such as switches and routers - -- Applications and automatic service migration - -- Storage components - -- Facility services such as power, air conditioning, and fire protection - -In the event that a component fails and a back-up system must take on -its load, most high availability systems will replace the failed -component as quickly as possible to maintain necessary redundancy. This -way time spent in a degraded protection state is minimized. - -Most high availability systems fail in the event of multiple -independent (non-consequential) failures. In this case, most -implementations favor protecting data over maintaining availability. - -High availability systems typically achieve an uptime percentage of -99.99% or more, which roughly equates to less than an hour of -cumulative downtime per year. In order to achieve this, high -availability systems should keep recovery times after a failure to -about one to two minutes, sometimes significantly less. - -OpenStack currently meets such availability requirements for its own -infrastructure services, meaning that an uptime of 99.99% is feasible -for the OpenStack infrastructure proper. However, OpenStack does not -guarantee 99.99% availability for individual guest instances. - -This document discusses some common methods of implementing highly -available systems, with an emphasis on the core OpenStack services and -other open source services that are closely aligned with OpenStack. -These methods are by no means the only ways to do it; -you may supplement these services with commercial hardware and software -that provides additional features and functionality. -You also need to address high availability concerns -for any applications software that you run on your OpenStack environment. -The important thing is to make sure that your services are redundant -and available; how you achieve that is up to you. - -Stateless vs. stateful services -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Preventing single points of failure can depend on whether or not a -service is stateless. - -Stateless service - A service that provides a response after your request - and then requires no further attention. - To make a stateless service highly available, - you need to provide redundant instances and load balance them. - OpenStack services that are stateless include ``nova-api``, - ``nova-conductor``, ``glance-api``, ``keystone-api``, - ``neutron-api`` and ``nova-scheduler``. - -Stateful service - A service where subsequent requests to the service - depend on the results of the first request. - Stateful services are more difficult to manage because a single - action typically involves more than one request, so simply providing - additional instances and load balancing does not solve the problem. - For example, if the horizon user interface reset itself every time - you went to a new page, it would not be very useful. - OpenStack services that are stateful include the OpenStack database - and message queue. - Making stateful services highly available can depend on whether you choose - an active/passive or active/active configuration. - -Active/Passive vs. Active/Active -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Stateful services may be configured as active/passive or active/active: - -:term:`active/passive configuration` - Maintains a redundant instance - that can be brought online when the active service fails. - For example, OpenStack writes to the main database - while maintaining a disaster recovery database that can be brought online - if the main database fails. - - A typical active/passive installation for a stateful service maintains - a replacement resource that can be brought online when required. - Requests are handled using a :term:`virtual IP` address (VIP) that - facilitates returning to service with minimal reconfiguration. - A separate application (such as Pacemaker or Corosync) monitors - these services, bringing the backup online as necessary. - -:term:`active/active configuration` - Each service also has a backup but manages both the main and - redundant systems concurrently. - This way, if there is a failure, the user is unlikely to notice. - The backup system is already online and takes on increased load - while the main system is fixed and brought back online. - - Typically, an active/active installation for a stateless service - maintains a redundant instance, and requests are load balanced using - a virtual IP address and a load balancer such as HAProxy. - - A typical active/active installation for a stateful service includes - redundant services, with all instances having an identical state. In - other words, updates to one instance of a database update all other - instances. This way a request to one instance is the same as a - request to any other. A load balancer manages the traffic to these - systems, ensuring that operational systems always handle the - request. - -Clusters and quorums -~~~~~~~~~~~~~~~~~~~~ - -The quorum specifies the minimal number of nodes -that must be functional in a cluster of redundant nodes -in order for the cluster to remain functional. -When one node fails and failover transfers control to other nodes, -the system must ensure that data and processes remain sane. -To determine this, the contents of the remaining nodes are compared -and, if there are discrepancies, a "majority rules" algorithm is implemented. - -For this reason, each cluster in a high availability environment should -have an odd number of nodes and the quorum is defined as more than a half -of the nodes. -If multiple nodes fail so that the cluster size falls below the quorum -value, the cluster itself fails. - -For example, in a seven-node cluster, the quorum should be set to -floor(7/2) + 1 == 4. If quorum is four and four nodes fail simultaneously, -the cluster itself would fail, whereas it would continue to function, if -no more than three nodes fail. If split to partitions of three and four nodes -respectively, the quorum of four nodes would continue to operate the majority -partition and stop or fence the minority one (depending on the -no-quorum-policy cluster configuration). - -And the quorum could also have been set to three, just as a configuration -example. - -.. note:: - - Note that setting the quorum to a value less than floor(n/2) + 1 is not - recommended and would likely cause a split-brain in a face of network - partitions. - -Then, for the given example when four nodes fail simultaneously, -the cluster would continue to function as well. But if split to partitions of -three and four nodes respectively, the quorum of three would have made both -sides to attempt to fence the other and host resources. And without fencing -enabled, it would go straight to running two copies of each resource. - -This is why setting the quorum to a value less than floor(n/2) + 1 is -dangerous. However it may be required for some specific cases, like a -temporary measure at a point it is known with 100% certainty that the other -nodes are down. - -When configuring an OpenStack environment for study or demonstration purposes, -it is possible to turn off the quorum checking; -this is discussed later in this guide. -Production systems should always run with quorum enabled. - - -Single-controller high availability mode -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack supports a single-controller high availability mode -that is managed by the services that manage highly available environments -but is not actually highly available because -no redundant controllers are configured to use for failover. -This environment can be used for study and demonstration -but is not appropriate for a production environment. - -It is possible to add controllers to such an environment -to convert it into a truly highly available environment. - - -High availability is not for every user. It presents some challenges. -High availability may be too complex for databases or -systems with large amounts of data. Replication can slow large systems -down. Different setups have different prerequisites. Read the guidelines -for each setup. - -High availability is turned off as the default in OpenStack setups. diff --git a/doc/ha-guide/source/intro-ha-controller.rst b/doc/ha-guide/source/intro-ha-controller.rst deleted file mode 100644 index 26cf2391..00000000 --- a/doc/ha-guide/source/intro-ha-controller.rst +++ /dev/null @@ -1,62 +0,0 @@ -======================================== -Overview of highly-available controllers -======================================== - -OpenStack is a set of multiple services exposed to the end users -as HTTP(s) APIs. Additionally, for own internal usage OpenStack -requires SQL database server and AMQP broker. The physical servers, -where all the components are running are often called controllers. -This modular OpenStack architecture allows to duplicate all the -components and run them on different controllers. -By making all the components redundant it is possible to make -OpenStack highly-available. - -In general we can divide all the OpenStack components into three categories: - -- OpenStack APIs, these are HTTP(s) stateless services written in python, - easy to duplicate and mostly easy to load balance. - -- SQL relational database server provides stateful type consumed by other - components. Supported databases are MySQL, MariaDB, and PostgreSQL. - Making SQL database redundant is complex. - -- :term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack - internal stateful communication service. - -Network components -~~~~~~~~~~~~~~~~~~ - -[TODO Need discussion of network hardware, bonding interfaces, -intelligent Layer 2 switches, routers and Layer 3 switches.] - -The configuration uses static routing without -Virtual Router Redundancy Protocol (VRRP) -or similar techniques implemented. - -[TODO Need description of VIP failover inside Linux namespaces -and expected SLA.] - -See [TODO link] for more information about configuring networking -for high availability. - -Common deployement architectures -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are primarily two HA architectures in use today. - -One uses a cluster manager such as Pacemaker or Veritas to co-ordinate -the actions of the various services across a set of machines. Since -we are focused on FOSS, we will refer to this as the Pacemaker -architecture. - -The other is optimized for Active/Active services that do not require -any inter-machine coordination. In this setup, services are started by -your init system (systemd in most modern distributions) and a tool is -used to move IP addresses between the hosts. The most common package -for doing this is keepalived. - -.. toctree:: - :maxdepth: 1 - - intro-ha-arch-pacemaker.rst - intro-ha-arch-keepalived.rst diff --git a/doc/ha-guide/source/intro-ha-other.rst b/doc/ha-guide/source/intro-ha-other.rst deleted file mode 100644 index e623ab38..00000000 --- a/doc/ha-guide/source/intro-ha-other.rst +++ /dev/null @@ -1,4 +0,0 @@ - -====================================== -High availability for other components -====================================== diff --git a/doc/ha-guide/source/intro-ha-storage.rst b/doc/ha-guide/source/intro-ha-storage.rst deleted file mode 100644 index f410fa56..00000000 --- a/doc/ha-guide/source/intro-ha-storage.rst +++ /dev/null @@ -1,12 +0,0 @@ -===================================== -Overview of high availability storage -===================================== - -Making the Block Storage (cinder) API service highly available in -active/active mode involves: - -* Configuring Block Storage to listen on the VIP address - -* Managing the Block Storage API daemon with the Pacemaker cluster manager - -* Configuring OpenStack services to use this IP address diff --git a/doc/ha-guide/source/intro-ha.rst b/doc/ha-guide/source/intro-ha.rst deleted file mode 100644 index dc4a5bdd..00000000 --- a/doc/ha-guide/source/intro-ha.rst +++ /dev/null @@ -1,15 +0,0 @@ - -=========================================== -Introduction to OpenStack high availability -=========================================== - - -.. toctree:: - :maxdepth: 2 - - intro-ha-concepts.rst - intro-ha-controller.rst - intro-ha-storage.rst - intro-ha-compute.rst - intro-ha-other.rst - diff --git a/doc/ha-guide/source/locale/ha-guide.pot b/doc/ha-guide/source/locale/ha-guide.pot deleted file mode 100644 index cf7431e7..00000000 --- a/doc/ha-guide/source/locale/ha-guide.pot +++ /dev/null @@ -1,4261 +0,0 @@ -# SOME DESCRIPTIVE TITLE. -# Copyright (C) 2015, OpenStack contributors -# This file is distributed under the same license as the High Availability Guide package. -# FIRST AUTHOR , YEAR. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: High Availability Guide 0.0.1\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2016-03-07 06:00+0000\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME \n" -"Language-Team: LANGUAGE \n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#: ../compute-node-ha-api.rst:4 -msgid "Configure high availability on compute nodes" -msgstr "" - -#: ../compute-node-ha-api.rst:6 -msgid "" -"The `Installation Guide `_ gives instructions for installing multiple compute nodes. To make " -"them highly available, you must configure the environment to include " -"multiple instances of the API and other services." -msgstr "" - -#: ../compute-node-ha.rst:4 -msgid "Configuring the compute node for high availability" -msgstr "" - -#: ../controller-ha-galera-config.rst:2 -msgid "Configuration" -msgstr "" - -#: ../controller-ha-galera-config.rst:4 -msgid "" -"Before you launch Galera Cluster, you need to configure the server and the " -"database to operate as part of the cluster." -msgstr "" - -#: ../controller-ha-galera-config.rst:8 -msgid "Configuring the server" -msgstr "" - -#: ../controller-ha-galera-config.rst:10 -msgid "" -"Certain services running on the underlying operating system of your " -"OpenStack database may block Galera Cluster from normal operation or prevent " -"``mysqld`` from achieving network connectivity with the cluster." -msgstr "" - -#: ../controller-ha-galera-config.rst:16 -msgid "Firewall" -msgstr "" - -#: ../controller-ha-galera-config.rst:18 -msgid "Galera Cluster requires that you open four ports to network traffic:" -msgstr "" - -#: ../controller-ha-galera-config.rst:20 -msgid "" -"On ``3306``, Galera Cluster uses TCP for database client connections and " -"State Snapshot Transfers methods that require the client, (that is, " -"``mysqldump``)." -msgstr "" - -#: ../controller-ha-galera-config.rst:23 -msgid "" -"On ``4567`` Galera Cluster uses TCP for replication traffic. Multicast " -"replication uses both TCP and UDP on this port." -msgstr "" - -#: ../controller-ha-galera-config.rst:25 -msgid "On ``4568`` Galera Cluster uses TCP for Incremental State Transfers." -msgstr "" - -#: ../controller-ha-galera-config.rst:26 -msgid "" -"On ``4444`` Galera Cluster uses TCP for all other State Snapshot Transfer " -"methods." -msgstr "" - -#: ../controller-ha-galera-config.rst:29 -msgid "" -"For more information on firewalls, see `Firewalls and default ports `_, in the Configuration Reference." -msgstr "" - -#: ../controller-ha-galera-config.rst:35 -msgid "``iptables``" -msgstr "" - -#: ../controller-ha-galera-config.rst:37 -msgid "" -"For many Linux distributions, you can configure the firewall using the " -"``iptables`` utility. To do so, complete the following steps:" -msgstr "" - -#: ../controller-ha-galera-config.rst:40 -msgid "" -"For each cluster node, run the following commands, replacing ``NODE-IP-" -"ADDRESS`` with the IP address of the cluster node you want to open the " -"firewall to:" -msgstr "" - -#: ../controller-ha-galera-config.rst:59 -msgid "" -"In the event that you also want to configure multicast replication, run this " -"command as well:" -msgstr "" - -#: ../controller-ha-galera-config.rst:69 -msgid "" -"Make the changes persistent. For servers that use ``init``, use the :command:" -"`save` command:" -msgstr "" - -#: ../controller-ha-galera-config.rst:76 -msgid "" -"For servers that use ``systemd``, you need to save the current packet " -"filtering to the path of the file that ``iptables`` reads when it starts. " -"This path can vary by distribution, but common locations are in the ``/etc`` " -"directory, such as:" -msgstr "" - -#: ../controller-ha-galera-config.rst:81 -msgid "``/etc/sysconfig/iptables``" -msgstr "" - -#: ../controller-ha-galera-config.rst:82 -msgid "``/etc/iptables/iptables.rules``" -msgstr "" - -#: ../controller-ha-galera-config.rst:84 -msgid "" -"When you find the correct path, run the :command:`iptables-save` command:" -msgstr "" - -#: ../controller-ha-galera-config.rst:90 -#: ../controller-ha-galera-config.rst:137 -msgid "" -"With the firewall configuration saved, whenever your OpenStack database " -"starts." -msgstr "" - -#: ../controller-ha-galera-config.rst:94 -msgid "``firewall-cmd``" -msgstr "" - -#: ../controller-ha-galera-config.rst:96 -msgid "" -"For many Linux distributions, you can configure the firewall using the " -"``firewall-cmd`` utility for FirewallD. To do so, complete the following " -"steps on each cluster node:" -msgstr "" - -#: ../controller-ha-galera-config.rst:100 -msgid "Add the Galera Cluster service:" -msgstr "" - -#: ../controller-ha-galera-config.rst:106 -msgid "" -"For each instance of OpenStack database in your cluster, run the following " -"commands, replacing ``NODE-IP-ADDRESS`` with the IP address of the cluster " -"node you want to open the firewall to:" -msgstr "" - -#: ../controller-ha-galera-config.rst:117 -msgid "" -"In the event that you also want to configure mutlicast replication, run this " -"command as well:" -msgstr "" - -#: ../controller-ha-galera-config.rst:124 -msgid "" -"To make this configuration persistent, repeat the above commands with the :" -"option:`--permanent` option." -msgstr "" - -#: ../controller-ha-galera-config.rst:141 -msgid "SELinux" -msgstr "" - -#: ../controller-ha-galera-config.rst:143 -msgid "" -"Security-Enhanced Linux is a kernel module for improving security on Linux " -"operating systems. It is commonly enabled and configured by default on Red " -"Hat-based distributions. In the context of Galera Cluster, systems with " -"SELinux may block the database service, keep it from starting or prevent it " -"from establishing network connections with the cluster." -msgstr "" - -#: ../controller-ha-galera-config.rst:149 -msgid "" -"To configure SELinux to permit Galera Cluster to operate, complete the " -"following steps on each cluster node:" -msgstr "" - -#: ../controller-ha-galera-config.rst:152 -msgid "Using the ``semanage`` utility, open the relevant ports:" -msgstr "" - -#: ../controller-ha-galera-config.rst:161 -msgid "" -"In the event that you use multicast replication, you also need to open " -"``4567`` to UDP traffic:" -msgstr "" - -#: ../controller-ha-galera-config.rst:168 -msgid "Set SELinux to allow the database server to run:" -msgstr "" - -#: ../controller-ha-galera-config.rst:174 -msgid "With these options set, SELinux now permits Galera Cluster to operate." -msgstr "" - -#: ../controller-ha-galera-config.rst:176 -msgid "" -"Bear in mind, leaving SELinux in permissive mode is not a good security " -"practice. Over the longer term, you need to develop a security policy for " -"Galera Cluster and then switch SELinux back into enforcing mode." -msgstr "" - -#: ../controller-ha-galera-config.rst:181 -msgid "" -"For more information on configuring SELinux to work with Galera Cluster, see " -"the `Documentation `_" -msgstr "" - -#: ../controller-ha-galera-config.rst:187 -msgid "AppArmor" -msgstr "" - -#: ../controller-ha-galera-config.rst:189 -msgid "" -"Application Armor is a kernel module for improving security on Linux " -"operating systems. It is developed by Canonical and commonly used on Ubuntu-" -"based distributions. In the context of Galera Cluster, systems with AppArmor " -"may block the database service from operating normally." -msgstr "" - -#: ../controller-ha-galera-config.rst:194 -msgid "" -"To configure AppArmor to work with Galera Cluster, complete the following " -"steps on each cluster node:" -msgstr "" - -#: ../controller-ha-galera-config.rst:197 -msgid "" -"Create a symbolic link for the database server in the ``disable`` directory:" -msgstr "" - -#: ../controller-ha-galera-config.rst:203 -msgid "" -"Restart AppArmor. For servers that use ``init``, run the following command:" -msgstr "" - -# #-#-#-#-# controller-ha-galera-config.pot (High Availability Guide 0.0.1) #-#-#-#-# -# #-#-#-#-# controller-ha-galera-manage.pot (High Availability Guide 0.0.1) #-#-#-#-# -#: ../controller-ha-galera-config.rst:209 -#: ../controller-ha-galera-manage.rst:43 ../controller-ha-galera-manage.rst:70 -msgid "For servers that use ``systemd``, instead run this command:" -msgstr "" - -#: ../controller-ha-galera-config.rst:215 -msgid "AppArmor now permits Galera Cluster to operate." -msgstr "" - -#: ../controller-ha-galera-config.rst:219 -msgid "Database configuration" -msgstr "" - -#: ../controller-ha-galera-config.rst:221 -msgid "" -"MySQL databases, including MariaDB and Percona XtraDB, manage their " -"configurations using a ``my.cnf`` file, which is typically located in the ``/" -"etc`` directory. Configuration options available in these databases are also " -"available in Galera Cluster, with some restrictions and several additions." -msgstr "" - -#: ../controller-ha-galera-config.rst:252 -msgid "Configuring ``mysqld``" -msgstr "" - -#: ../controller-ha-galera-config.rst:254 -msgid "" -"While all of the configuration parameters available to the standard MySQL, " -"MariaDB or Percona XtraDB database server are available in Galera Cluster, " -"there are some that you must define an outset to avoid conflict or " -"unexpected behavior." -msgstr "" - -#: ../controller-ha-galera-config.rst:259 -msgid "" -"Ensure that the database server is not bound only to to the localhost, " -"``127.0.0.1``. Instead, bind it to ``0.0.0.0`` to ensure it listens on all " -"available interfaces." -msgstr "" - -#: ../controller-ha-galera-config.rst:267 -msgid "" -"Ensure that the binary log format is set to use row-level replication, as " -"opposed to statement-level replication:" -msgstr "" - -#: ../controller-ha-galera-config.rst:276 -msgid "Configuring InnoDB" -msgstr "" - -#: ../controller-ha-galera-config.rst:278 -msgid "" -"Galera Cluster does not support non-transactional storage engines and " -"requires that you use InnoDB by default. There are some additional " -"parameters that you must define to avoid conflicts." -msgstr "" - -#: ../controller-ha-galera-config.rst:282 -msgid "Ensure that the default storage engine is set to InnoDB:" -msgstr "" - -#: ../controller-ha-galera-config.rst:288 -msgid "" -"Ensure that the InnoDB locking mode for generating auto-increment values is " -"set to ``2``, which is the interleaved locking mode." -msgstr "" - -#: ../controller-ha-galera-config.rst:295 -msgid "" -"Do not change this value. Other modes may cause ``INSERT`` statements on " -"tables with auto-increment columns to fail as well as unresolved deadlocks " -"that leave the system unresponsive." -msgstr "" - -#: ../controller-ha-galera-config.rst:299 -msgid "" -"Ensure that the InnoDB log buffer is written to file once per second, rather " -"than on each commit, to improve performance:" -msgstr "" - -#: ../controller-ha-galera-config.rst:306 -msgid "" -"Bear in mind, while setting this parameter to ``1`` or ``2`` can improve " -"performance, it introduces certain dangers. Operating system failures can " -"erase the last second of transactions. While you can recover this data from " -"another node, if the cluster goes down at the same time (in the event of a " -"data center power outage), you lose this data permanently." -msgstr "" - -#: ../controller-ha-galera-config.rst:312 -msgid "" -"Define the InnoDB memory buffer pool size. The default value is 128 MB, but " -"to compensate for Galera Cluster's additional memory usage, scale your usual " -"value back by 5%:" -msgstr "" - -#: ../controller-ha-galera-config.rst:322 -msgid "Configuring wsrep replication" -msgstr "" - -#: ../controller-ha-galera-config.rst:324 -msgid "" -"Galera Cluster configuration parameters all have the ``wsrep_`` prefix. " -"There are five that you must define for each cluster node in your OpenStack " -"database." -msgstr "" - -#: ../controller-ha-galera-config.rst:328 -msgid "" -"**wsrep Provider** The Galera Replication Plugin serves as the wsrep " -"Provider for Galera Cluster. It is installed on your system as the " -"``libgalera_smm.so`` file. You must define the path to this file in your " -"``my.cnf``." -msgstr "" - -#: ../controller-ha-galera-config.rst:337 -msgid "**Cluster Name** Define an arbitrary name for your cluster." -msgstr "" - -#: ../controller-ha-galera-config.rst:343 -msgid "" -"You must use the same name on every cluster node. The connection fails when " -"this value does not match." -msgstr "" - -#: ../controller-ha-galera-config.rst:346 -msgid "**Cluster Address** List the IP addresses for each cluster node." -msgstr "" - -#: ../controller-ha-galera-config.rst:352 -msgid "" -"Replace the IP addresses given here with comma-separated list of each " -"OpenStack database in your cluster." -msgstr "" - -#: ../controller-ha-galera-config.rst:355 -msgid "**Node Name** Define the logical name of the cluster node." -msgstr "" - -#: ../controller-ha-galera-config.rst:361 -msgid "**Node Address** Define the IP address of the cluster node." -msgstr "" - -#: ../controller-ha-galera-config.rst:371 -msgid "Additional parameters" -msgstr "" - -#: ../controller-ha-galera-config.rst:373 -msgid "" -"For a complete list of the available parameters, run the ``SHOW VARIABLES`` " -"command from within the database client:" -msgstr "" - -#: ../controller-ha-galera-config.rst:394 -msgid "" -"For the documentation of these parameters, wsrep Provider option and status " -"variables available in Galera Cluster, see `Reference `_." -msgstr "" - -#: ../controller-ha-galera-install.rst:2 -msgid "Installation" -msgstr "" - -#: ../controller-ha-galera-install.rst:4 -msgid "" -"Using Galera Cluster requires that you install two packages. The first is " -"the database server, which must include the wsrep API patch. The second " -"package is the Galera Replication Plugin, which enables the write-set " -"replication service functionality with the database server." -msgstr "" - -#: ../controller-ha-galera-install.rst:9 -msgid "" -"There are three implementations of Galera Cluster: MySQL, MariaDB and " -"Percona XtraDB. For each implementation, there is a software repository that " -"provides binary packages for Debian, Red Hat, and SUSE-based Linux " -"distributions." -msgstr "" - -#: ../controller-ha-galera-install.rst:16 -msgid "Enabling the repository" -msgstr "" - -#: ../controller-ha-galera-install.rst:18 -msgid "" -"Galera Cluster is not available in the base repositories of Linux " -"distributions. In order to install it with your package manage, you must " -"first enable the repository on your system. The particular methods for doing " -"so vary depending on which distribution you use for OpenStack and which " -"database server you want to use." -msgstr "" - -#: ../controller-ha-galera-install.rst:25 -msgid "Debian" -msgstr "" - -#: ../controller-ha-galera-install.rst:27 -msgid "" -"For Debian and Debian-based distributions, such as Ubuntu, complete the " -"following steps:" -msgstr "" - -#: ../controller-ha-galera-install.rst:30 -msgid "Add the GnuPG key for the database repository that you want to use." -msgstr "" - -#: ../controller-ha-galera-install.rst:37 -msgid "" -"Note that the particular key value in this command varies depending on which " -"database software repository you want to use." -msgstr "" - -#: ../controller-ha-galera-install.rst:41 -msgid "Database" -msgstr "" - -#: ../controller-ha-galera-install.rst:41 -msgid "Key" -msgstr "" - -#: ../controller-ha-galera-install.rst:43 -msgid "Galera Cluster for MySQL" -msgstr "" - -#: ../controller-ha-galera-install.rst:43 -msgid "``BC19DDBA``" -msgstr "" - -#: ../controller-ha-galera-install.rst:45 -msgid "MariaDB Galera Cluster" -msgstr "" - -#: ../controller-ha-galera-install.rst:45 -msgid "``0xcbcb082a1bb943db``" -msgstr "" - -#: ../controller-ha-galera-install.rst:47 -msgid "Percona XtraDB Cluster" -msgstr "" - -#: ../controller-ha-galera-install.rst:47 -msgid "``1C4CBDCDCD2EFD2A``" -msgstr "" - -#: ../controller-ha-galera-install.rst:50 -msgid "" -"Add the repository to your sources list. Using your preferred text editor, " -"create a ``galera.list`` file in the ``/etc/apt/sources.list.d/`` directory. " -"For the contents of this file, use the lines that pertain to the software " -"repository you want to install:" -msgstr "" - -#: ../controller-ha-galera-install.rst:66 -msgid "" -"For each entry: Replace all instances of ``DISTRO`` with the distribution " -"that you use, such as ``debian`` or ``ubuntu``. Replace all instances of " -"``RELEASE`` with the release of that distribution, such as ``wheezy`` or " -"``trusty``. Replace all instances of ``VERSION`` with the version of the " -"database server that you want to install, such as ``5.6`` or ``10.0``." -msgstr "" - -#: ../controller-ha-galera-install.rst:72 -msgid "" -"In the event that you do not know the release code-name for your " -"distribution, you can use the following command to find it out:" -msgstr "" - -#: ../controller-ha-galera-install.rst:81 -msgid "Update the local cache." -msgstr "" - -#: ../controller-ha-galera-install.rst:87 -msgid "" -"Packages in the Galera Cluster Debian repository are now available for " -"installation on your system." -msgstr "" - -#: ../controller-ha-galera-install.rst:91 -msgid "Red Hat" -msgstr "" - -#: ../controller-ha-galera-install.rst:93 -msgid "" -"For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the " -"process is more straightforward. In this file, only enter the text for the " -"repository you want to use." -msgstr "" - -#: ../controller-ha-galera-install.rst:97 -msgid "" -"For Galera Cluster for MySQL, using your preferred text editor, create a " -"``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory." -msgstr "" - -#: ../controller-ha-galera-install.rst:108 -msgid "" -"Replace ``DISTRO`` with the name of the distribution you use, such as " -"``centos`` or ``fedora``. Replace ``RELEASE`` with the release number, such " -"as ``7`` for CentOS 7. Replace ``ARCH`` with your system architecture, such " -"as ``x86_64``" -msgstr "" - -#: ../controller-ha-galera-install.rst:113 -msgid "" -"For MariaDB Galera Cluster, using your preferred text editor, create a " -"``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory." -msgstr "" - -#: ../controller-ha-galera-install.rst:124 -msgid "" -"Replace ``VERSION`` with the version of MariaDB you want to install, such as " -"``5.6`` or ``10.0``. Replace ``PACKAGE`` with the package type and " -"architecture, such as ``rhel6-amd64`` for Red Hat 6 on 64-bit architecture." -msgstr "" - -#: ../controller-ha-galera-install.rst:129 -msgid "For Percona XtraDB Cluster, run the following command:" -msgstr "" - -#: ../controller-ha-galera-install.rst:135 -msgid "" -"Bear in mind that the Percona repository only supports Red Hat Enterprise " -"Linux and CentOS distributions." -msgstr "" - -#: ../controller-ha-galera-install.rst:138 -msgid "" -"Packages in the Galera Cluster Red Hat repository are not available for " -"installation on your system." -msgstr "" - -#: ../controller-ha-galera-install.rst:144 -msgid "SUSE" -msgstr "" - -#: ../controller-ha-galera-install.rst:146 -msgid "" -"For SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE " -"binary installations are only available for Galera Cluster for MySQL and " -"MariaDB Galera Cluster." -msgstr "" - -#: ../controller-ha-galera-install.rst:150 -msgid "" -"Create a ``Galera.repo`` file in the local directory. For Galera Cluster for " -"MySQL, use the following content:" -msgstr "" - -#: ../controller-ha-galera-install.rst:161 -msgid "" -"In the text: Replace ``DISTRO`` with the name of the distribution you use, " -"such as ``sles`` or ``opensuse``. Replace ``RELEASE`` with the version " -"number of that distribution." -msgstr "" - -#: ../controller-ha-galera-install.rst:165 -msgid "For MariaDB Galera Cluster, instead use this content:" -msgstr "" - -#: ../controller-ha-galera-install.rst:175 -msgid "" -"In the text: Replace ``VERSION`` with the version of MariaDB you want to " -"install, such as ``5.6`` or ``10.0``. Replace package with the package " -"architecture you want to use, such as ``opensuse13-amd64``." -msgstr "" - -#: ../controller-ha-galera-install.rst:179 -msgid "Add the repository to your system:" -msgstr "" - -#: ../controller-ha-galera-install.rst:185 -msgid "Refresh ``zypper``:" -msgstr "" - -#: ../controller-ha-galera-install.rst:191 -msgid "" -"Packages in the Galera Cluster SUSE repository are now available for " -"installation." -msgstr "" - -#: ../controller-ha-galera-install.rst:196 -msgid "Installing Galera Cluster" -msgstr "" - -#: ../controller-ha-galera-install.rst:198 -msgid "" -"When you finish enabling the software repository for Galera Cluster, you can " -"install it using your package manager. The particular command and packages " -"you need to install varies depending on which database server you want to " -"install and which Linux distribution you use:" -msgstr "" - -#: ../controller-ha-galera-install.rst:203 -msgid "Galera Cluster for MySQL:" -msgstr "" - -#: ../controller-ha-galera-install.rst:206 -#: ../controller-ha-galera-install.rst:230 -#: ../controller-ha-galera-install.rst:255 -msgid "" -"For Debian and Debian-based distributions, such as Ubuntu, run the following " -"command:" -msgstr "" - -#: ../controller-ha-galera-install.rst:213 -#: ../controller-ha-galera-install.rst:237 -#: ../controller-ha-galera-install.rst:262 -msgid "" -"For Red Hat Enterprise Linux and Red Hat-based distributions, such as Fedora " -"or CentOS, instead run this command:" -msgstr "" - -#: ../controller-ha-galera-install.rst:220 -#: ../controller-ha-galera-install.rst:244 -msgid "" -"For SUSE Enterprise Linux Server and SUSE-based distributions, such as " -"openSUSE, instead run this command:" -msgstr "" - -#: ../controller-ha-galera-install.rst:228 -msgid "MariaDB Galera Cluster:" -msgstr "" - -#: ../controller-ha-galera-install.rst:252 -msgid "Percona XtraDB Cluster:" -msgstr "" - -#: ../controller-ha-galera-install.rst:269 -msgid "" -"Galera Cluster is now installed on your system. You must repeat this process " -"for each controller node in your cluster." -msgstr "" - -#: ../controller-ha-galera-install.rst:272 -msgid "" -"In the event that you already installed the standalone version of MySQL, " -"MariaDB or Percona XtraDB, this installation purges all privileges on your " -"OpenStack database server. You must reapply the privileges listed in the " -"installation guide." -msgstr "" - -#: ../controller-ha-galera-manage.rst:2 -msgid "Management" -msgstr "" - -#: ../controller-ha-galera-manage.rst:4 -msgid "" -"When you finish the installation and configuration process on each cluster " -"node in your OpenStack database, you can initialize Galera Cluster." -msgstr "" - -#: ../controller-ha-galera-manage.rst:7 -msgid "Before you attempt this, verify that you have the following ready:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:9 -msgid "" -"Database hosts with Galera Cluster installed. You need a minimum of three " -"hosts;" -msgstr "" - -#: ../controller-ha-galera-manage.rst:11 -msgid "No firewalls between the hosts;" -msgstr "" - -#: ../controller-ha-galera-manage.rst:12 -msgid "SELinux and AppArmor set to permit access to ``mysqld``;" -msgstr "" - -#: ../controller-ha-galera-manage.rst:13 -msgid "" -"The correct path to ``libgalera_smm.so`` given to the ``wsrep_provider`` " -"parameter." -msgstr "" - -#: ../controller-ha-galera-manage.rst:17 -msgid "Initializing the cluster" -msgstr "" - -#: ../controller-ha-galera-manage.rst:19 -msgid "" -"In Galera Cluster, the Primary Component is the cluster of database servers " -"that replicate into each other. In the event that a cluster node loses " -"connectivity with the Primary Component, it defaults into a non-operational " -"state, to avoid creating or serving inconsistent data." -msgstr "" - -#: ../controller-ha-galera-manage.rst:25 -msgid "" -"By default, cluster nodes do not start as part of a Primary Component. " -"Instead they assume that one exists somewhere and attempts to establish a " -"connection with it. To create a Primary Component, you must start one " -"cluster node using the ``--wsrep-new-cluster`` option. You can do this using " -"any cluster node, it is not important which you choose. In the Primary " -"Component, replication and state transfers bring all databases to the same " -"state." -msgstr "" - -#: ../controller-ha-galera-manage.rst:34 -msgid "To start the cluster, complete the following steps:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:36 -msgid "" -"Initialize the Primary Component on one cluster node. For servers that use " -"``init``, run the following command:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:49 -msgid "" -"Once the database server starts, check the cluster status using the " -"``wsrep_cluster_size`` status variable. From the database client, run the " -"following command:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:63 -msgid "" -"Start the database server on all other cluster nodes. For servers that use " -"``init``, run the following command:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:76 -msgid "" -"When you have all cluster nodes started, log into the database client on one " -"of them and check the ``wsrep_cluster_size`` status variable again." -msgstr "" - -#: ../controller-ha-galera-manage.rst:90 -msgid "" -"When each cluster node starts, it checks the IP addresses given to the " -"``wsrep_cluster_address`` parameter and attempts to establish network " -"connectivity with a database server running there. Once it establishes a " -"connection, it attempts to join the Primary Component, requesting a state " -"transfer as needed to bring itself into sync with the cluster." -msgstr "" - -#: ../controller-ha-galera-manage.rst:97 -msgid "" -"In the event that you need to restart any cluster node, you can do so. When " -"the database server comes back it, it establishes connectivity with the " -"Primary Component and updates itself to any changes it may have missed while " -"down." -msgstr "" - -#: ../controller-ha-galera-manage.rst:104 -msgid "Restarting the cluster" -msgstr "" - -#: ../controller-ha-galera-manage.rst:106 -msgid "" -"Individual cluster nodes can stop and be restarted without issue. When a " -"database loses its connection or restarts, Galera Cluster brings it back " -"into sync once it reestablishes connection with the Primary Component. In " -"the event that you need to restart the entire cluster, identify the most " -"advanced cluster node and initialize the Primary Component on that node." -msgstr "" - -#: ../controller-ha-galera-manage.rst:113 -msgid "" -"To find the most advanced cluster node, you need to check the sequence " -"numbers, or seqnos, on the last committed transaction for each. You can find " -"this by viewing ``grastate.dat`` file in database directory," -msgstr "" - -#: ../controller-ha-galera-manage.rst:127 -msgid "" -"Alternatively, if the database server is running, use the " -"``wsrep_last_committed`` status variable:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:140 -msgid "" -"This value increments with each transaction, so the most advanced node has " -"the highest sequence number, and therefore is the most up to date." -msgstr "" - -#: ../controller-ha-galera-manage.rst:145 -msgid "Configuration tips" -msgstr "" - -#: ../controller-ha-galera-manage.rst:149 -msgid "Deployment strategies" -msgstr "" - -#: ../controller-ha-galera-manage.rst:151 -msgid "Galera can be configured using one of the following strategies:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:154 -msgid "Each instance has its own IP address;" -msgstr "" - -#: ../controller-ha-galera-manage.rst:156 -msgid "" -"OpenStack services are configured with the list of these IP addresses so " -"they can select one of the addresses from those available." -msgstr "" - -#: ../controller-ha-galera-manage.rst:160 -msgid "Galera runs behind HAProxy." -msgstr "" - -#: ../controller-ha-galera-manage.rst:162 -msgid "" -"HAProxy load balances incoming requests and exposes just one IP address for " -"all the clients." -msgstr "" - -#: ../controller-ha-galera-manage.rst:165 -msgid "" -"Galera synchronous replication guarantees a zero slave lag. The failover " -"procedure completes once HAProxy detects that the active back end has gone " -"down and switches to the backup one, which is then marked as 'UP'. If no " -"back ends are up (in other words, the Galera cluster is not ready to accept " -"connections), the failover procedure finishes only when the Galera cluster " -"has been successfully reassembled. The SLA is normally no more than 5 " -"minutes." -msgstr "" - -#: ../controller-ha-galera-manage.rst:174 -msgid "" -"Use MySQL/Galera in active/passive mode to avoid deadlocks on ``SELECT ... " -"FOR UPDATE`` type queries (used, for example, by nova and neutron). This " -"issue is discussed more in the following:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:178 -msgid "http://lists.openstack.org/pipermail/openstack-dev/2014-May/035264.html" -msgstr "" - -#: ../controller-ha-galera-manage.rst:179 -msgid "http://www.joinfu.com/" -msgstr "" - -#: ../controller-ha-galera-manage.rst:181 -msgid "" -"Of these options, the second one is highly recommended. Although Galera " -"supports active/active configurations, we recommend active/passive (enforced " -"by the load balancer) in order to avoid lock contention." -msgstr "" - -#: ../controller-ha-galera-manage.rst:188 -msgid "Configuring HAProxy" -msgstr "" - -#: ../controller-ha-galera-manage.rst:190 -msgid "" -"If you use HAProxy for load-balancing client access to Galera Cluster as " -"described in the :doc:`controller-ha-haproxy`, you can use the " -"``clustercheck`` utility to improve health checks." -msgstr "" - -#: ../controller-ha-galera-manage.rst:194 -msgid "" -"Create a configuration file for ``clustercheck`` at ``/etc/sysconfig/" -"clustercheck``:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:204 -msgid "" -"Log in to the database client and grant the ``clustercheck`` user " -"``PROCESS`` privileges." -msgstr "" - -#: ../controller-ha-galera-manage.rst:214 -msgid "" -"You only need to do this on one cluster node. Galera Cluster replicates the " -"user to all the others." -msgstr "" - -#: ../controller-ha-galera-manage.rst:217 -msgid "" -"Create a configuration file for the HAProxy monitor service, at ``/etc/" -"xinetd.d/galera-monitor``:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:239 -msgid "" -"Start the ``xinetd`` daemon for ``clustercheck``. For servers that use " -"``init``, run the following commands:" -msgstr "" - -#: ../controller-ha-galera-manage.rst:247 -msgid "For servers that use ``systemd``, instead run these commands:" -msgstr "" - -#: ../controller-ha-galera.rst:2 -msgid "Database (Galera Cluster)" -msgstr "" - -#: ../controller-ha-galera.rst:4 -msgid "" -"The first step is to install the database that sits at the heart of the " -"cluster. To implement high availability, run an instance of the database on " -"each controller node and use Galera Cluster to provide replication between " -"them. Galera Cluster is a synchronous multi-master database cluster, based " -"on MySQL and the InnoDB storage engine. It is a high-availability service " -"that provides high system uptime, no data loss, and scalability for growth." -msgstr "" - -#: ../controller-ha-galera.rst:11 -msgid "" -"You can achieve high availability for the OpenStack database in many " -"different ways, depending on the type of database that you want to use. " -"There are three implementations of Galera Cluster available to you:" -msgstr "" - -#: ../controller-ha-galera.rst:15 -msgid "" -"`Galera Cluster for MySQL `_ The MySQL reference " -"implementation from Codership, Oy;" -msgstr "" - -#: ../controller-ha-galera.rst:17 -msgid "" -"`MariaDB Galera Cluster `_ The MariaDB implementation " -"of Galera Cluster, which is commonly supported in environments based on Red " -"Hat distributions;" -msgstr "" - -#: ../controller-ha-galera.rst:20 -msgid "" -"`Percona XtraDB Cluster `_ The XtraDB " -"implementation of Galera Cluster from Percona." -msgstr "" - -#: ../controller-ha-galera.rst:23 -msgid "" -"In addition to Galera Cluster, you can also achieve high availability " -"through other database options, such as PostgreSQL, which has its own " -"replication system." -msgstr "" - -#: ../controller-ha-haproxy.rst:3 -msgid "HAProxy" -msgstr "" - -#: ../controller-ha-haproxy.rst:5 -msgid "" -"HAProxy provides a fast and reliable HTTP reverse proxy and load balancer " -"for TCP or HTTP applications. It is particularly suited for web crawling " -"under very high loads while needing persistence or Layer 7 processing. It " -"realistically supports tens of thousands of connections with recent hardware." -msgstr "" - -#: ../controller-ha-haproxy.rst:11 -msgid "" -"Each instance of HAProxy configures its front end to accept connections only " -"from the virtual IP (VIP) address and to terminate them as a list of all " -"instances of the corresponding service under load balancing, such as any " -"OpenStack API service." -msgstr "" - -#: ../controller-ha-haproxy.rst:16 -msgid "" -"This makes the instances of HAProxy act independently and fail over " -"transparently together with the network endpoints (VIP addresses) failover " -"and, therefore, shares the same SLA." -msgstr "" - -#: ../controller-ha-haproxy.rst:20 -msgid "" -"You can alternatively use a commercial load balancer, which is a hardware or " -"software. A hardware load balancer generally has good performance." -msgstr "" - -#: ../controller-ha-haproxy.rst:23 -msgid "" -"For detailed instructions about installing HAProxy on your nodes, see its " -"`official documentation `_." -msgstr "" - -#: ../controller-ha-haproxy.rst:28 -msgid "" -"HAProxy should not be a single point of failure. It is advisable to have " -"multiple HAProxy instances running, where the number of these instances is a " -"small odd number like 3 or 5. You need to ensure its availability by other " -"means, such as Keepalived or Pacemaker." -msgstr "" - -#: ../controller-ha-haproxy.rst:34 -msgid "" -"The common practice is to locate an HAProxy instance on each OpenStack " -"controller in the environment." -msgstr "" - -#: ../controller-ha-haproxy.rst:37 -msgid "" -"Once configured (see example file below), add HAProxy to the cluster and " -"ensure the VIPs can only run on machines where HAProxy is active:" -msgstr "" - -# #-#-#-#-# controller-ha-haproxy.pot (High Availability Guide 0.0.1) #-#-#-#-# -# #-#-#-#-# controller-ha-pacemaker.pot (High Availability Guide 0.0.1) #-#-#-#-# -#: ../controller-ha-haproxy.rst:40 ../controller-ha-pacemaker.rst:574 -msgid "``pcs``" -msgstr "" - -# #-#-#-#-# controller-ha-haproxy.pot (High Availability Guide 0.0.1) #-#-#-#-# -# #-#-#-#-# controller-ha-pacemaker.pot (High Availability Guide 0.0.1) #-#-#-#-# -#: ../controller-ha-haproxy.rst:48 ../controller-ha-pacemaker.rst:565 -msgid "``crmsh``" -msgstr "" - -#: ../controller-ha-haproxy.rst:50 -msgid "TBA" -msgstr "" - -#: ../controller-ha-haproxy.rst:53 -msgid "Example Config File" -msgstr "" - -#: ../controller-ha-haproxy.rst:55 -msgid "" -"Here is an example ``/etc/haproxy/haproxy.cfg`` configuration file. You need " -"a copy of it on each controller node." -msgstr "" - -#: ../controller-ha-haproxy.rst:60 -msgid "" -"To implement any changes made to this you must restart the HAProxy service" -msgstr "" - -#: ../controller-ha-haproxy.rst:218 -msgid "" -"The Galera cluster configuration directive ``backup`` indicates that two of " -"the three controllers are standby nodes. This ensures that only one node " -"services write requests because OpenStack support for multi-node writes is " -"not yet production-ready." -msgstr "" - -#: ../controller-ha-haproxy.rst:225 -msgid "" -"The Telemetry API service configuration does not have the ``option httpchk`` " -"directive as it cannot process this check properly. TODO: explain why the " -"Telemetry API is so special" -msgstr "" - -#: ../controller-ha-haproxy.rst:229 -msgid "" -"[TODO: we need more commentary about the contents and format of this file]" -msgstr "" - -#: ../controller-ha-keystone.rst:4 -msgid "Identity services (keystone)" -msgstr "" - -#: ../controller-ha-keystone.rst:6 -msgid "" -"OpenStack Identity (keystone) is the Identity service in OpenStack that is " -"used by many services. You should be familiar with `OpenStack identity " -"concepts `_ before proceeding." -msgstr "" - -#: ../controller-ha-keystone.rst:13 -msgid "" -"Making the OpenStack Identity service highly available in active / passive " -"mode involves:" -msgstr "" - -#: ../controller-ha-keystone.rst:16 -msgid ":ref:`keystone-pacemaker`" -msgstr "" - -#: ../controller-ha-keystone.rst:17 -msgid ":ref:`keystone-config-identity`" -msgstr "" - -#: ../controller-ha-keystone.rst:18 -msgid ":ref:`keystone-services-config`" -msgstr "" - -#: ../controller-ha-keystone.rst:23 -msgid "Add OpenStack Identity resource to Pacemaker" -msgstr "" - -#: ../controller-ha-keystone.rst:25 -msgid "" -"You must first download the OpenStack Identity resource to Pacemaker by " -"running the following commands:" -msgstr "" - -#: ../controller-ha-keystone.rst:36 -msgid "" -"You can now add the Pacemaker configuration for the OpenStack Identity " -"resource by running the :command:`crm configure` command to connect to the " -"Pacemaker cluster. Add the following cluster resources:" -msgstr "" - -#: ../controller-ha-keystone.rst:52 -msgid "" -"This configuration creates ``p_keystone``, a resource for managing the " -"OpenStack Identity service." -msgstr "" - -#: ../controller-ha-keystone.rst:55 -msgid "" -":command:`crm configure` supports batch input so you may copy and paste the " -"above lines into your live Pacemaker configuration, and then make changes as " -"required. For example, you may enter edit ``p_ip_keystone`` from the :" -"command:`crm configure` menu and edit the resource to match your preferred " -"virtual IP address." -msgstr "" - -#: ../controller-ha-keystone.rst:63 -msgid "" -"After you add these resources, commit your configuration changes by " -"entering :command:`commit` from the :command:`crm configure` menu. Pacemaker " -"then starts the OpenStack Identity service and its dependent resources on " -"one of your nodes." -msgstr "" - -#: ../controller-ha-keystone.rst:72 -msgid "Configure OpenStack Identity service" -msgstr "" - -#: ../controller-ha-keystone.rst:74 -msgid "" -"Edit the :file:`keystone.conf` file to change the values of the :manpage:" -"`bind(2)` parameters:" -msgstr "" - -#: ../controller-ha-keystone.rst:83 -msgid "" -"The ``admin_bind_host`` parameter lets you use a private network for admin " -"access." -msgstr "" - -#: ../controller-ha-keystone.rst:86 -msgid "" -"To be sure that all data is highly available, ensure that everything is " -"stored in the MySQL database (which is also highly available):" -msgstr "" - -#: ../controller-ha-keystone.rst:103 -msgid "" -"Configure OpenStack services to use the highly available OpenStack Identity" -msgstr "" - -#: ../controller-ha-keystone.rst:105 -msgid "" -"Your OpenStack services must now point their OpenStack Identity " -"configuration to the highly available virtual cluster IP address rather than " -"point to the physical IP address of an OpenStack Identity server as you " -"would do in a non-HA environment." -msgstr "" - -#: ../controller-ha-keystone.rst:112 -msgid "" -"For OpenStack Compute, for example, if your OpenStack Identiy service IP " -"address is 10.0.0.11, use the following configuration in your :file:`api-" -"paste.ini` file:" -msgstr "" - -#: ../controller-ha-keystone.rst:120 -msgid "" -"You also need to create the OpenStack Identity Endpoint with this IP address." -msgstr "" - -#: ../controller-ha-keystone.rst:125 -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IP addresses and define your endpoint like this:" -msgstr "" - -#: ../controller-ha-keystone.rst:139 -msgid "" -"If you are using the horizon dashboard, edit the :file:`local_settings.py` " -"file to include the following:" -msgstr "" - -# #-#-#-#-# controller-ha-memcached.pot (High Availability Guide 0.0.1) #-#-#-#-# -# #-#-#-#-# intro-ha-arch-pacemaker.pot (High Availability Guide 0.0.1) #-#-#-#-# -#: ../controller-ha-memcached.rst:3 ../intro-ha-arch-pacemaker.rst:179 -msgid "Memcached" -msgstr "" - -#: ../controller-ha-memcached.rst:5 -msgid "" -"Memcached is a general-purpose distributed memory caching system. It is used " -"to speed up dynamic database-driven websites by caching data and objects in " -"RAM to reduce the number of times an external data source must be read." -msgstr "" - -#: ../controller-ha-memcached.rst:10 -msgid "" -"Memcached is a memory cache demon that can be used by most OpenStack " -"services to store ephemeral data, such as tokens." -msgstr "" - -#: ../controller-ha-memcached.rst:13 -msgid "" -"Access to memcached is not handled by HAproxy because replicated access is " -"currently only in an experimental state. Instead OpenStack services must be " -"supplied with the full list of hosts running memcached." -msgstr "" - -#: ../controller-ha-memcached.rst:18 -msgid "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance only impacts a percentage of the objects " -"and the client automatically removes it from the list of instances. The SLA " -"is several minutes." -msgstr "" - -#: ../controller-ha-pacemaker.rst:3 -msgid "Pacemaker cluster stack" -msgstr "" - -#: ../controller-ha-pacemaker.rst:5 -msgid "" -"`Pacemaker `_ cluster stack is the state-of-the-art " -"high availability and load balancing stack for the Linux platform. Pacemaker " -"is useful to make OpenStack infrastructure highly available. Also, it is " -"storage and application-agnostic, and in no way specific to OpenStack." -msgstr "" - -#: ../controller-ha-pacemaker.rst:11 -msgid "" -"Pacemaker relies on the `Corosync `_ " -"messaging layer for reliable cluster communications. Corosync implements the " -"Totem single-ring ordering and membership protocol. It also provides UDP and " -"InfiniBand based messaging, quorum, and cluster membership to Pacemaker." -msgstr "" - -#: ../controller-ha-pacemaker.rst:18 -msgid "" -"Pacemaker does not inherently (need or want to) understand the applications " -"it manages. Instead, it relies on resource agents (RAs), scripts that " -"encapsulate the knowledge of how to start, stop, and check the health of " -"each application managed by the cluster." -msgstr "" - -#: ../controller-ha-pacemaker.rst:23 -msgid "" -"These agents must conform to one of the `OCF `_, `SysV Init " -"`_, Upstart, or Systemd standards." -msgstr "" - -#: ../controller-ha-pacemaker.rst:28 -msgid "" -"Pacemaker ships with a large set of OCF agents (such as those managing MySQL " -"databases, virtual IP addresses, and RabbitMQ), but can also use any agents " -"already installed on your system and can be extended with your own (see the " -"`developer guide `_)." -msgstr "" - -#: ../controller-ha-pacemaker.rst:34 -msgid "The steps to implement the Pacemaker cluster stack are:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:36 -msgid ":ref:`pacemaker-install`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:37 -msgid ":ref:`pacemaker-corosync-setup`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:38 -msgid ":ref:`pacemaker-corosync-start`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:39 -msgid ":ref:`pacemaker-start`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:40 -msgid ":ref:`pacemaker-cluster-properties`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:45 -msgid "Install packages" -msgstr "" - -#: ../controller-ha-pacemaker.rst:47 -msgid "" -"On any host that is meant to be part of a Pacemaker cluster, you must first " -"establish cluster communications through the Corosync messaging layer. This " -"involves installing the following packages (and their dependencies, which " -"your package manager usually installs automatically):" -msgstr "" - -#: ../controller-ha-pacemaker.rst:54 -msgid "pacemaker" -msgstr "" - -#: ../controller-ha-pacemaker.rst:56 -msgid "pcs (CentOS or RHEL) or crmsh" -msgstr "" - -#: ../controller-ha-pacemaker.rst:58 -msgid "corosync" -msgstr "" - -#: ../controller-ha-pacemaker.rst:60 -msgid "fence-agents (CentOS or RHEL) or cluster-glue" -msgstr "" - -#: ../controller-ha-pacemaker.rst:62 -msgid "resource-agents" -msgstr "" - -#: ../controller-ha-pacemaker.rst:64 -msgid "libqb0" -msgstr "" - -#: ../controller-ha-pacemaker.rst:69 -msgid "Set up the cluster with `pcs`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:71 -msgid "Make sure pcs is running and configured to start at boot time:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:78 -msgid "Set a password for hacluster user **on each host**." -msgstr "" - -#: ../controller-ha-pacemaker.rst:80 -msgid "" -"Since the cluster is a single administrative domain, it is generally " -"accepted to use the same password on all nodes." -msgstr "" - -#: ../controller-ha-pacemaker.rst:88 -msgid "" -"Use that password to authenticate to the nodes which will make up the " -"cluster. The :option:`-p` option is used to give the password on command " -"line and makes it easier to script." -msgstr "" - -#: ../controller-ha-pacemaker.rst:97 -msgid "Create the cluster, giving it a name, and start it:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:107 -msgid "" -"In Red Hat Enterprise Linux or CentOS environments, this is a recommended " -"path to perform configuration. For more information, see the `RHEL docs " -"`_." -msgstr "" - -#: ../controller-ha-pacemaker.rst:112 -msgid "Set up the cluster with `crmsh`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:114 -msgid "" -"After installing the Corosync package, you must create the :file:`/etc/" -"corosync/corosync.conf` configuration file." -msgstr "" - -#: ../controller-ha-pacemaker.rst:118 -msgid "" -"For Ubuntu, you should also enable the Corosync service in the ``/etc/" -"default/corosync`` configuration file." -msgstr "" - -#: ../controller-ha-pacemaker.rst:121 -msgid "" -"Corosync can be configured to work with either multicast or unicast IP " -"addresses or to use the votequorum library." -msgstr "" - -#: ../controller-ha-pacemaker.rst:125 -msgid ":ref:`corosync-multicast`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:126 -msgid ":ref:`corosync-unicast`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:127 -msgid ":ref:`corosync-votequorum`" -msgstr "" - -#: ../controller-ha-pacemaker.rst:132 -msgid "Set up Corosync with multicast" -msgstr "" - -#: ../controller-ha-pacemaker.rst:134 -msgid "" -"Most distributions ship an example configuration file (:file:`corosync.conf." -"example`) as part of the documentation bundled with the Corosync package. An " -"example Corosync configuration file is shown below:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:139 -msgid "**Example Corosync configuration file for multicast (corosync.conf)**" -msgstr "" - -#: ../controller-ha-pacemaker.rst:210 ../controller-ha-pacemaker.rst:342 -#: ../controller-ha-pacemaker.rst:426 ../controller-ha-pacemaker.rst:583 -msgid "Note the following:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:212 -msgid "" -"The ``token`` value specifies the time, in milliseconds, during which the " -"Corosync token is expected to be transmitted around the ring. When this " -"timeout expires, the token is declared lost, and after " -"``token_retransmits_before_loss_const lost`` tokens, the non-responding " -"processor (cluster node) is declared dead. In other words, ``token × " -"token_retransmits_before_loss_const`` is the maximum time a node is allowed " -"to not respond to cluster messages before being considered dead. The default " -"for token is 1000 milliseconds (1 second), with 4 allowed retransmits. These " -"defaults are intended to minimize failover times, but can cause frequent " -"\"false alarms\" and unintended failovers in case of short network " -"interruptions. The values used here are safer, albeit with slightly extended " -"failover times." -msgstr "" - -#: ../controller-ha-pacemaker.rst:228 -msgid "" -"With ``secauth`` enabled, Corosync nodes mutually authenticate using a 128-" -"byte shared secret stored in the :file:`/etc/corosync/authkey` file, which " -"may be generated with the :command:`corosync-keygen` utility. When using " -"``secauth``, cluster communications are also encrypted." -msgstr "" - -#: ../controller-ha-pacemaker.rst:234 -msgid "" -"In Corosync configurations using redundant networking (with more than one " -"interface), you must select a Redundant Ring Protocol (RRP) mode other than " -"none. ``active`` is the recommended RRP mode." -msgstr "" - -#: ../controller-ha-pacemaker.rst:239 -msgid "Note the following about the recommended interface configuration:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:241 -msgid "" -"Each configured interface must have a unique ``ringnumber``, starting with 0." -msgstr "" - -#: ../controller-ha-pacemaker.rst:244 -msgid "" -"The ``bindnetaddr`` is the network address of the interfaces to bind to. The " -"example uses two network addresses of /24 IPv4 subnets." -msgstr "" - -#: ../controller-ha-pacemaker.rst:247 -msgid "" -"Multicast groups (``mcastaddr``) must not be reused across cluster " -"boundaries. In other words, no two distinct clusters should ever use the " -"same multicast group. Be sure to select multicast addresses compliant with " -"`RFC 2365, \"Administratively Scoped IP Multicast\" `_." -msgstr "" - -#: ../controller-ha-pacemaker.rst:255 -msgid "" -"For firewall configurations, note that Corosync communicates over UDP only, " -"and uses ``mcastport`` (for receives) and ``mcastport - 1`` (for sends)." -msgstr "" - -#: ../controller-ha-pacemaker.rst:260 -msgid "" -"The service declaration for the pacemaker service may be placed in the :file:" -"`corosync.conf` file directly or in its own separate file, :file:`/etc/" -"corosync/service.d/pacemaker`." -msgstr "" - -#: ../controller-ha-pacemaker.rst:266 -msgid "" -"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " -"lines under the service stanza, which enables Pacemaker to start up. Another " -"potential problem is the boot and shutdown order of Corosync and Pacemaker. " -"To force Pacemaker to start after Corosync and stop before Corosync, fix the " -"start and kill symlinks manually:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:277 -msgid "" -"The Pacemaker service also requires an additional configuration file ``/etc/" -"corosync/uidgid.d/pacemaker`` to be created with the following content:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:288 -msgid "" -"Once created, the :file:`corosync.conf` file (and the :file:`authkey` file " -"if the secauth option is enabled) must be synchronized across all cluster " -"nodes." -msgstr "" - -#: ../controller-ha-pacemaker.rst:295 -msgid "Set up Corosync with unicast" -msgstr "" - -#: ../controller-ha-pacemaker.rst:297 -msgid "" -"For environments that do not support multicast, Corosync should be " -"configured for unicast. An example fragment of the :file:`corosync.conf` " -"file for unicastis shown below:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:302 -msgid "**Corosync configuration file fragment for unicast (corosync.conf)**" -msgstr "" - -#: ../controller-ha-pacemaker.rst:344 -msgid "" -"If the ``broadcast`` parameter is set to yes, the broadcast address is used " -"for communication. If this option is set, the ``mcastaddr`` parameter should " -"not be set." -msgstr "" - -#: ../controller-ha-pacemaker.rst:348 -msgid "" -"The ``transport`` directive controls the transport mechanism used. To avoid " -"the use of multicast entirely, specify the ``udpu`` unicast transport " -"parameter. This requires specifying the list of members in the ``nodelist`` " -"directive; this could potentially make up the membership before deployment. " -"The default is ``udp``. The transport type can also be set to ``udpu`` or " -"``iba``." -msgstr "" - -#: ../controller-ha-pacemaker.rst:357 -msgid "" -"Within the ``nodelist`` directive, it is possible to specify specific " -"information about the nodes in the cluster. The directive can contain only " -"the node sub-directive, which specifies every node that should be a member " -"of the membership, and where non-default options are needed. Every node must " -"have at least the ``ring0_addr`` field filled." -msgstr "" - -#: ../controller-ha-pacemaker.rst:367 -msgid "" -"For UDPU, every node that should be a member of the membership must be " -"specified." -msgstr "" - -#: ../controller-ha-pacemaker.rst:370 -msgid "Possible options are:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:372 -msgid "" -"``ring{X}_addr`` specifies the IP address of one of the nodes. {X} is the " -"ring number." -msgstr "" - -#: ../controller-ha-pacemaker.rst:375 -msgid "" -"``nodeid`` is optional when using IPv4 and required when using IPv6. This is " -"a 32-bit value specifying the node identifier delivered to the cluster " -"membership service. If this is not specified with IPv4, the node id is " -"determined from the 32-bit IP address of the system to which the system is " -"bound with ring identifier of 0. The node identifier value of zero is " -"reserved and should not be used." -msgstr "" - -#: ../controller-ha-pacemaker.rst:388 -msgid "Set up Corosync with votequorum library" -msgstr "" - -#: ../controller-ha-pacemaker.rst:390 -msgid "" -"The votequorum library is part of the corosync project. It provides an " -"interface to the vote-based quorum service and it must be explicitly enabled " -"in the Corosync configuration file. The main role of votequorum library is " -"to avoid split-brain situations, but it also provides a mechanism to:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:396 -msgid "Query the quorum status" -msgstr "" - -#: ../controller-ha-pacemaker.rst:398 -msgid "Get a list of nodes known to the quorum service" -msgstr "" - -#: ../controller-ha-pacemaker.rst:400 -msgid "Receive notifications of quorum state changes" -msgstr "" - -#: ../controller-ha-pacemaker.rst:402 -msgid "Change the number of votes assigned to a node" -msgstr "" - -#: ../controller-ha-pacemaker.rst:404 -msgid "Change the number of expected votes for a cluster to be quorate" -msgstr "" - -#: ../controller-ha-pacemaker.rst:406 -msgid "" -"Connect an additional quorum device to allow small clusters remain quorate " -"during node outages" -msgstr "" - -#: ../controller-ha-pacemaker.rst:409 -msgid "" -"The votequorum library has been created to replace and eliminate qdisk, the " -"disk-based quorum daemon for CMAN, from advanced cluster configurations." -msgstr "" - -#: ../controller-ha-pacemaker.rst:413 -msgid "" -"A sample votequorum service configuration in the :file:`corosync.com` file " -"is:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:428 -msgid "" -"Specifying ``corosync_votequorum`` enables the votequorum library; this is " -"the only required option." -msgstr "" - -#: ../controller-ha-pacemaker.rst:431 -msgid "" -"The cluster is fully operational with ``expected_votes`` set to 7 nodes " -"(each node has 1 vote), quorum: 4. If a list of nodes is specified as " -"``nodelist``, the ``expected_votes`` value is ignored." -msgstr "" - -#: ../controller-ha-pacemaker.rst:436 -msgid "" -"Setting ``wait_for_all`` to 1 means that, When starting up a cluster (all " -"nodes down), the cluster quorum is held until all nodes are online and have " -"joined the cluster for the first time. This parameter is new in Corosync 2.0." -msgstr "" - -#: ../controller-ha-pacemaker.rst:442 -msgid "" -"Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) " -"feature; by default, it is disabled (set to 0). If a cluster is on the " -"quorum edge (``expected_votes:`` set to 7; ``online nodes:`` set to 4) for " -"longer than the time specified for the ``last_man_standing_window`` " -"parameter, the cluster can recalculate quorum and continue operating even if " -"the next node will be lost. This logic is repeated until the number of " -"online nodes in the cluster reaches 2. In order to allow the cluster to step " -"down from 2 members to only 1, the ``auto_tie_breaker`` parameter needs to " -"be set; this is not recommended for production environments." -msgstr "" - -#: ../controller-ha-pacemaker.rst:457 -msgid "" -"``last_man_standing_window`` specifies the time, in milliseconds, required " -"to recalculate quorum after one or more hosts have been lost from the " -"cluster. To do the new quorum recalculation, the cluster must have quorum " -"for at least the interval specified for ``last_man_standing_window``; the " -"default is 10000ms." -msgstr "" - -#: ../controller-ha-pacemaker.rst:469 -msgid "Start Corosync" -msgstr "" - -#: ../controller-ha-pacemaker.rst:471 -msgid "" -"Corosync is started as a regular system service. Depending on your " -"distribution, it may ship with an LSB init script, an upstart job, or a " -"systemd unit file. Either way, the service is usually named corosync:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:476 -msgid ":command:`# /etc/init.d/corosync start` (LSB)" -msgstr "" - -#: ../controller-ha-pacemaker.rst:477 -msgid ":command:`# service corosync start` (LSB, alternate)" -msgstr "" - -#: ../controller-ha-pacemaker.rst:478 -msgid ":command:`# start corosync` (upstart)" -msgstr "" - -#: ../controller-ha-pacemaker.rst:479 -msgid ":command:`# systemctl start corosync` (systemd)" -msgstr "" - -#: ../controller-ha-pacemaker.rst:481 -msgid "You can now check the Corosync connectivity with two tools." -msgstr "" - -#: ../controller-ha-pacemaker.rst:483 -msgid "" -"Use the :command:`corosync-cfgtool` utility with the :option:`-s` option to " -"get a summary of the health of the communication rings:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:498 -msgid "" -"Use the :command:`corosync-objctl` utility to dump the Corosync cluster " -"member list:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:511 -msgid "" -"You should see a ``status=joined`` entry for each of your constituent " -"cluster nodes." -msgstr "" - -#: ../controller-ha-pacemaker.rst:514 -msgid "" -"[TODO: Should the main example now use corosync-cmapctl and have the note " -"give the command for Corosync version 1?]" -msgstr "" - -#: ../controller-ha-pacemaker.rst:519 -msgid "" -"If you are using Corosync version 2, use the :command:`corosync-cmapctl` " -"utility instead of :command:`corosync-objctl`; it is a direct replacement." -msgstr "" - -#: ../controller-ha-pacemaker.rst:525 -msgid "Start Pacemaker" -msgstr "" - -#: ../controller-ha-pacemaker.rst:527 -msgid "" -"After the Corosync services have been started and you have verified that the " -"cluster is communicating properly, you can start :command:`pacemakerd`, the " -"Pacemaker master control process:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:531 -msgid ":command:`# /etc/init.d/pacemaker start` (LSB)" -msgstr "" - -#: ../controller-ha-pacemaker.rst:533 -msgid ":command:`# service pacemaker start` (LSB, alternate)" -msgstr "" - -#: ../controller-ha-pacemaker.rst:535 -msgid ":command:`# start pacemaker` (upstart)" -msgstr "" - -#: ../controller-ha-pacemaker.rst:537 -msgid ":command:`# systemctl start pacemaker` (systemd)" -msgstr "" - -#: ../controller-ha-pacemaker.rst:539 -msgid "" -"After the Pacemaker services have started, Pacemaker creates a default empty " -"cluster configuration with no resources. Use the :command:`crm_mon` utility " -"to observe the status of Pacemaker:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:560 -msgid "Set basic cluster properties" -msgstr "" - -#: ../controller-ha-pacemaker.rst:562 -msgid "" -"After you set up your Pacemaker cluster, you should set a few basic cluster " -"properties:" -msgstr "" - -#: ../controller-ha-pacemaker.rst:585 -msgid "" -"Setting the ``pe-warn-series-max``, ``pe-input-series-max`` and ``pe-error-" -"series-max`` parameters to 1000 instructs Pacemaker to keep a longer history " -"of the inputs processed and errors and warnings generated by its Policy " -"Engine. This history is useful if you need to troubleshoot the cluster." -msgstr "" - -#: ../controller-ha-pacemaker.rst:591 -msgid "" -"Pacemaker uses an event-driven approach to cluster state processing. The " -"``cluster-recheck-interval`` parameter (which defaults to 15 minutes) " -"defines the interval at which certain Pacemaker actions occur. It is usually " -"prudent to reduce this to a shorter interval, such as 5 or 3 minutes." -msgstr "" - -#: ../controller-ha-pacemaker.rst:597 -msgid "After you make these changes, you may commit the updated configuration." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:0 ../controller-ha-rabbitmq.rst:76 -msgid "Install RabbitMQ" -msgstr "" - -# #-#-#-#-# controller-ha-rabbitmq.pot (High Availability Guide 0.0.1) #-#-#-#-# -# #-#-#-#-# intro-ha-arch-pacemaker.pot (High Availability Guide 0.0.1) #-#-#-#-# -#: ../controller-ha-rabbitmq.rst:3 ../intro-ha-arch-pacemaker.rst:178 -msgid "RabbitMQ" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:5 -msgid "" -"An AMQP (Advanced Message Queuing Protocol) compliant message bus is " -"required for most OpenStack components in order to coordinate the execution " -"of jobs entered into the system." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:9 -msgid "" -"The most popular AMQP implementation used in OpenStack installations is " -"RabbitMQ." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:12 -msgid "" -"RabbitMQ nodes fail over both on the application and the infrastructure " -"layers." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:15 -msgid "" -"The application layer is controlled by the ``oslo.messaging`` configuration " -"options for multiple AMQP hosts. If the AMQP node fails, the application " -"reconnects to the next one configured within the specified reconnect " -"interval. The specified reconnect interval constitutes its SLA." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:21 -msgid "" -"On the infrastructure layer, the SLA is the time for which RabbitMQ cluster " -"reassembles. Several cases are possible. The Mnesia keeper node is the " -"master of the corresponding Pacemaker resource for RabbitMQ; when it fails, " -"the result is a full AMQP cluster downtime interval. Normally, its SLA is no " -"more than several minutes. Failure of another node that is a slave of the " -"corresponding Pacemaker resource for RabbitMQ results in no AMQP cluster " -"downtime at all." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:29 -msgid "" -"Making the RabbitMQ service highly available involves the following steps:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:31 -msgid ":ref:`Install RabbitMQ`" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:33 -msgid ":ref:`Configure RabbitMQ for HA queues`" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:35 -msgid "" -":ref:`Configure OpenStack services to use Rabbit HA queues `" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:40 -msgid "" -"Access to RabbitMQ is not normally handled by HAproxy. Instead, consumers " -"must be supplied with the full list of hosts running RabbitMQ with " -"``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` option." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:45 -msgid "" -"Jon Eck found the `core issue `_ and went into some detail regarding the " -"`history and solution `_ on his blog." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:51 -msgid "In summary though:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:53 -msgid "" -"The source address for the connection from HAProxy back to the client is the " -"VIP address. However the VIP address is no longer present on the host. This " -"means that the network (IP) layer deems the packet unroutable, and informs " -"the transport (TCP) layer. TCP, however, is a reliable transport. It knows " -"how to handle transient errors and will retry. And so it does." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:60 -msgid "In this case that is a problem though, because:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:62 -msgid "" -"TCP generally holds on to hope for a long time. A ballpark estimate is " -"somewhere on the order of tens of minutes (30 minutes is commonly " -"referenced). During this time it will keep probing and trying to deliver the " -"data." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:67 -msgid "" -"It is important to note that HAProxy has no idea that any of this is " -"happening. As far as its process is concerned, it called ``write()`` with " -"the data and the kernel returned success. The resolution is already " -"understood and just needs to make its way through a review." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:78 -msgid "" -"The commands for installing RabbitMQ are specific to the Linux distribution " -"you are using:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:85 -msgid "Distribution" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:86 -msgid "Command" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:87 -msgid "Ubuntu, Debian" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:88 -msgid ":command:`# apt-get install rabbitmq-server`" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:89 -msgid "RHEL, Fedora, CentOS" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:90 -msgid ":command:`# yum install rabbitmq-server`" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:91 -msgid "openSUSE" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:92 ../controller-ha-rabbitmq.rst:98 -msgid ":command:`# zypper install rabbitmq-server`" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:93 -msgid "SLES 12" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:94 -msgid ":command:`# zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo`" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:96 -msgid "[Verify fingerprint of imported GPG key; see below]" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:103 -msgid "" -"For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. You should " -"verify the fingerprint of the imported GPG key before using it." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:114 -msgid "" -"For more information, see the official installation manual for the " -"distribution:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:117 -msgid "`Debian and Ubuntu `_" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:118 -msgid "" -"`RPM based `_ (RHEL, Fedora, " -"CentOS, openSUSE)" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:124 -msgid "Configure RabbitMQ for HA queues" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:126 -msgid "" -"[TODO: This section should begin with a brief mention about what HA queues " -"are and why they are valuable, etc]" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:129 -msgid "" -"We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, " -"which is a logical grouping of several Erlang nodes." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:132 -msgid "The following components/services can work with HA queues:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:134 -msgid "[TODO: replace \"currently\" with specific release names]" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:136 -msgid "" -"[TODO: Does this list need to be updated? Perhaps we need a table that shows " -"each component and the earliest release that allows it to work with HA " -"queues.]" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:140 -msgid "OpenStack Compute" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:141 -msgid "OpenStack Block Storage" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:142 -msgid "OpenStack Networking" -msgstr "" - -# #-#-#-#-# controller-ha-rabbitmq.pot (High Availability Guide 0.0.1) #-#-#-#-# -# #-#-#-#-# controller-ha-telemetry.pot (High Availability Guide 0.0.1) #-#-#-#-# -#: ../controller-ha-rabbitmq.rst:143 ../controller-ha-telemetry.rst:4 -msgid "Telemetry" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:145 -msgid "" -"We have to consider that, while exchanges and bindings survive the loss of " -"individual nodes, queues and their messages do not because a queue and its " -"contents are located on one node. If we lose this node, we also lose the " -"queue." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:151 -msgid "" -"Mirrored queues in RabbitMQ improve the availability of service since it is " -"resilient to failures." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:154 -msgid "" -"Production servers should run (at least) three RabbitMQ servers; for testing " -"and demonstration purposes, it is possible to run only two servers. In this " -"section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. To " -"build a broker, we need to ensure that all nodes have the same Erlang cookie " -"file." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:162 -msgid "[TODO: Should the example instead use a minimum of three nodes?]" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:164 -msgid "" -"To do so, stop RabbitMQ everywhere and copy the cookie from the first node " -"to each of the other node(s):" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:171 -msgid "" -"On each target node, verify the correct owner, group, and permissions of the " -"file :file:`erlang.cookie`." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:179 -msgid "" -"Start the message queue service on all nodes and configure it to start when " -"the system boots." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:182 -msgid "On Ubuntu, it is configured by default." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:184 -msgid "On CentOS, RHEL, openSUSE, and SLES:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:191 -msgid "Verify that the nodes are running:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:202 -msgid "Run the following commands on each node except the first one:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:216 -msgid "" -"The default node type is a disc node. In this guide, nodes join the cluster " -"as RAM nodes." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:219 -msgid "To verify the cluster status:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:228 -msgid "" -"If the cluster is working, you can create usernames and passwords for the " -"queues." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:231 -msgid "" -"To ensure that all queues except those with auto-generated names are " -"mirrored across all running nodes, set the ``ha-mode`` policy key to all by " -"running the following command on one of the nodes:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:240 -msgid "More information is available in the RabbitMQ documentation:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:242 -msgid "`Highly Available Queues `_" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:243 -msgid "`Clustering Guide `_" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:247 -msgid "" -"As another option to make RabbitMQ highly available, RabbitMQ contains the " -"OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. " -"It provides the active/active RabbitMQ cluster with mirrored queues. For " -"more information, see `Auto-configuration of a cluster with a Pacemaker " -"`_." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:256 -msgid "Configure OpenStack services to use Rabbit HA queues" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:258 -msgid "" -"We have to configure the OpenStack components to use at least two RabbitMQ " -"nodes." -msgstr "" - -#: ../controller-ha-rabbitmq.rst:261 -msgid "Do this configuration on all services using RabbitMQ:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:263 -msgid "RabbitMQ HA cluster host:port pairs:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:269 -msgid "" -"How frequently to retry connecting with RabbitMQ: [TODO: document the unit " -"of measure here? Seconds?]" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:276 -msgid "" -"How long to back-off for between retries when connecting to RabbitMQ: [TODO: " -"document the unit of measure here? Seconds?]" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:283 -msgid "" -"Maximum retries with trying to connect to RabbitMQ (infinite by default):" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:289 -msgid "Use durable queues in RabbitMQ:" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:295 -msgid "Use HA queues in RabbitMQ (x-ha-policy: all):" -msgstr "" - -#: ../controller-ha-rabbitmq.rst:303 -msgid "" -"If you change the configuration from an old set-up that did not use HA " -"queues, you should restart the service:" -msgstr "" - -#: ../controller-ha-telemetry.rst:6 -msgid "[TODO (Add Telemetry overview)]" -msgstr "" - -#: ../controller-ha-telemetry.rst:9 -msgid "Telemetry central agent" -msgstr "" - -#: ../controller-ha-telemetry.rst:11 -msgid "" -"The Telemetry central agent can be configured to partition its polling " -"workload between multiple agents, enabling high availability." -msgstr "" - -#: ../controller-ha-telemetry.rst:14 -msgid "" -"Both the central and the compute agent can run in an HA deployment, which " -"means that multiple instances of these services can run in parallel with " -"workload partitioning among these running instances." -msgstr "" - -#: ../controller-ha-telemetry.rst:18 -msgid "" -"The `Tooz `__ library provides the " -"coordination within the groups of service instances. It provides an API " -"above several back ends that can be used for building distributed " -"applications." -msgstr "" - -#: ../controller-ha-telemetry.rst:23 -msgid "" -"Tooz supports `various drivers `__ including the following back end solutions:" -msgstr "" - -#: ../controller-ha-telemetry.rst:28 ../controller-ha-telemetry.rst:31 -msgid "Recommended solution by the Tooz project." -msgstr "" - -#: ../controller-ha-telemetry.rst:28 -msgid "`Zookeeper `__." -msgstr "" - -#: ../controller-ha-telemetry.rst:31 -msgid "`Redis `__." -msgstr "" - -#: ../controller-ha-telemetry.rst:34 -msgid "Recommended for testing." -msgstr "" - -#: ../controller-ha-telemetry.rst:34 -msgid "`Memcached `__." -msgstr "" - -#: ../controller-ha-telemetry.rst:36 -msgid "" -"You must configure a supported Tooz driver for the HA deployment of the " -"Telemetry services." -msgstr "" - -#: ../controller-ha-telemetry.rst:39 -msgid "" -"For information about the required configuration options that have to be set " -"in the :file:`ceilometer.conf` configuration file for both the central and " -"compute agents, see the `coordination section `__ in the OpenStack Configuration Reference." -msgstr "" - -#: ../controller-ha-telemetry.rst:46 -msgid "" -"Without the ``backend_url`` option being set only one instance of both the " -"central and compute agent service is able to run and function correctly." -msgstr "" - -#: ../controller-ha-telemetry.rst:50 -msgid "" -"The availability check of the instances is provided by heartbeat messages. " -"When the connection with an instance is lost, the workload will be " -"reassigned within the remained instances in the next polling cycle." -msgstr "" - -#: ../controller-ha-telemetry.rst:54 -msgid "" -"Memcached uses a timeout value, which should always be set to a value that " -"is higher than the heartbeat value set for Telemetry." -msgstr "" - -#: ../controller-ha-telemetry.rst:57 -msgid "" -"For backward compatibility and supporting existing deployments, the central " -"agent configuration also supports using different configuration files for " -"groups of service instances of this type that are running in parallel. For " -"enabling this configuration, set a value for the partitioning_group_prefix " -"option in the `central section `__ in the " -"OpenStack Configuration Reference." -msgstr "" - -#: ../controller-ha-telemetry.rst:65 -msgid "" -"For each sub-group of the central agent pool with the same " -"``partitioning_group_prefix`` a disjoint subset of meters must be polled -- " -"otherwise samples may be missing or duplicated. The list of meters to poll " -"can be set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. " -"For more information about pipelines see the `Data collection and processing " -"`__ section." -msgstr "" - -#: ../controller-ha-telemetry.rst:74 -msgid "" -"To enable the compute agent to run multiple instances simultaneously with " -"workload partitioning, the workload_partitioning option has to be set to " -"``True`` under the `compute section `__ in the :" -"file:`ceilometer.conf` configuration file." -msgstr "" - -#: ../controller-ha-vip.rst:4 -msgid "Configure the VIP" -msgstr "" - -#: ../controller-ha-vip.rst:6 -msgid "" -"You must select and assign a virtual IP address (VIP) that can freely float " -"between cluster nodes." -msgstr "" - -#: ../controller-ha-vip.rst:9 -msgid "" -"This configuration creates ``vip``, a virtual IP address for use by the API " -"node (``10.0.0.11``):" -msgstr "" - -#: ../controller-ha-vip.rst:12 -msgid "For ``crmsh``:" -msgstr "" - -#: ../controller-ha-vip.rst:19 -msgid "For ``pcs``:" -msgstr "" - -#: ../controller-ha.rst:4 -msgid "Configuring the controller for high availability" -msgstr "" - -#: ../controller-ha.rst:6 -msgid "" -"The cloud controller runs on the management network and must talk to all " -"other services." -msgstr "" - -#: ../hardware-ha-basic.rst:4 -msgid "Hardware setup" -msgstr "" - -#: ../hardware-ha-basic.rst:6 -msgid "The standard hardware requirements:" -msgstr "" - -#: ../hardware-ha-basic.rst:8 -msgid "" -"`Provider networks `_" -msgstr "" - -#: ../hardware-ha-basic.rst:9 -msgid "" -"`Self-service networks `_" -msgstr "" - -#: ../hardware-ha-basic.rst:11 -msgid "" -"However, OpenStack does not require a significant amount of resources and " -"the following minimum requirements should support a proof-of-concept high " -"availability environment with core services and several instances:" -msgstr "" - -#: ../hardware-ha-basic.rst:16 -msgid "[TODO: Verify that these numbers are good]" -msgstr "" - -#: ../hardware-ha-basic.rst:19 -msgid "Memory" -msgstr "" - -#: ../hardware-ha-basic.rst:19 -msgid "NIC" -msgstr "" - -#: ../hardware-ha-basic.rst:19 -msgid "Node type" -msgstr "" - -#: ../hardware-ha-basic.rst:19 -msgid "Processor" -msgstr "" - -#: ../hardware-ha-basic.rst:19 -msgid "Storage" -msgstr "" - -#: ../hardware-ha-basic.rst:21 -msgid "1-2" -msgstr "" - -#: ../hardware-ha-basic.rst:21 -msgid "100 GB" -msgstr "" - -#: ../hardware-ha-basic.rst:21 ../hardware-ha-basic.rst:23 -msgid "2" -msgstr "" - -#: ../hardware-ha-basic.rst:21 -msgid "8 GB" -msgstr "" - -#: ../hardware-ha-basic.rst:21 -msgid "controller node" -msgstr "" - -#: ../hardware-ha-basic.rst:23 -msgid "100+ GB" -msgstr "" - -#: ../hardware-ha-basic.rst:23 -msgid "2-4+" -msgstr "" - -#: ../hardware-ha-basic.rst:23 -msgid "8+ GB" -msgstr "" - -#: ../hardware-ha-basic.rst:23 -msgid "compute node" -msgstr "" - -#: ../hardware-ha-basic.rst:27 -msgid "" -"For demonstrations and studying, you can set up a test environment on " -"virtual machines (VMs). This has the following benefits:" -msgstr "" - -#: ../hardware-ha-basic.rst:31 -msgid "" -"One physical server can support multiple nodes, each of which supports " -"almost any number of network interfaces." -msgstr "" - -#: ../hardware-ha-basic.rst:34 -msgid "" -"Ability to take periodic \"snap shots\" throughout the installation process " -"and \"roll back\" to a working configuration in the event of a problem." -msgstr "" - -#: ../hardware-ha-basic.rst:37 -msgid "" -"However, running an OpenStack environment on VMs degrades the performance of " -"your instances, particularly if your hypervisor and/or processor lacks " -"support for hardware acceleration of nested VMs." -msgstr "" - -#: ../hardware-ha-basic.rst:44 -msgid "" -"When installing highly-available OpenStack on VMs, be sure that your " -"hypervisor permits promiscuous mode and disables MAC address filtering on " -"the external network." -msgstr "" - -#: ../hardware-ha.rst:4 -msgid "Hardware considerations for high availability" -msgstr "" - -#: ../hardware-ha.rst:6 -msgid "" -"[TODO: Provide a minimal architecture example for HA, expanded on that given " -"in http://docs.openstack.org/liberty/install-guide-ubuntu/environment.html " -"for easy comparison]" -msgstr "" - -#: ../index.rst:3 -msgid "OpenStack High Availability Guide" -msgstr "" - -#: ../index.rst:6 -msgid "Abstract" -msgstr "" - -#: ../index.rst:8 -msgid "" -"This guide describes how to install and configure OpenStack for high " -"availability. It supplements the OpenStack Installation Guides and assumes " -"that you are familiar with the material in those guides." -msgstr "" - -#: ../index.rst:13 -msgid "" -"This guide documents OpenStack Liberty, OpenStack Kilo, and OpenStack Juno " -"releases." -msgstr "" - -#: ../index.rst:16 -msgid "" -"This guide is a work-in-progress and changing rapidly while we continue to " -"test and enhance the guidance. Please note where there are open \"to do\" " -"items and help where you are able." -msgstr "" - -#: ../index.rst:21 -msgid "Contents" -msgstr "" - -#: ../index.rst:41 -msgid "Search in this guide" -msgstr "" - -#: ../index.rst:43 -msgid ":ref:`search`" -msgstr "" - -#: ../install-ha-memcached.rst:4 -msgid "Install memcached" -msgstr "" - -#: ../install-ha-memcached.rst:6 -msgid "" -"[TODO: Verify that Oslo supports hash synchronization; if so, this should " -"not take more than load balancing.]" -msgstr "" - -#: ../install-ha-memcached.rst:9 -msgid "" -"[TODO: This hands off to two different docs for install information. We " -"should choose one or explain the specific purpose of each.]" -msgstr "" - -#: ../install-ha-memcached.rst:12 -msgid "" -"Most OpenStack services can use memcached to store ephemeral data such as " -"tokens. Although memcached does not support typical forms of redundancy such " -"as clustering, OpenStack services can use almost any number of instances by " -"configuring multiple hostnames or IP addresses. The memcached client " -"implements hashing to balance objects among the instances. Failure of an " -"instance only impacts a percentage of the objects and the client " -"automatically removes it from the list of instances." -msgstr "" - -#: ../install-ha-memcached.rst:23 -msgid "" -"To install and configure memcached, read the `official documentation " -"`_." -msgstr "" - -#: ../install-ha-memcached.rst:26 -msgid "" -"Memory caching is managed by `oslo.cache `_ so the way " -"to use multiple memcached servers is the same for all projects." -msgstr "" - -#: ../install-ha-memcached.rst:30 -msgid "[TODO: Should this show three hosts?]" -msgstr "" - -#: ../install-ha-memcached.rst:32 -msgid "Example configuration with two hosts:" -msgstr "" - -#: ../install-ha-memcached.rst:38 -msgid "" -"By default, `controller1` handles the caching service but, if the host goes " -"down, `controller2` does the job. For more information about memcached " -"installation, see the `OpenStack Cloud Administrator Guide `_." -msgstr "" - -#: ../install-ha-ntp.rst:3 -msgid "Configure NTP" -msgstr "" - -#: ../install-ha-ntp.rst:5 -msgid "" -"You must configure NTP to properly synchronize services among nodes. We " -"recommend that you configure the controller node to reference more accurate " -"(lower stratum) servers and other nodes to reference the controller node. " -"For more information, see the `Install Guides `_." -msgstr "" - -#: ../install-ha-os.rst:3 -msgid "Install operating system on each node" -msgstr "" - -#: ../install-ha-os.rst:5 -msgid "" -"The first step in setting up your highly-available OpenStack cluster is to " -"install the operating system on each node. Follow the instructions in the " -"OpenStack Installation Guides:" -msgstr "" - -#: ../install-ha-os.rst:9 -msgid "" -"`CentOS and RHEL `_" -msgstr "" - -#: ../install-ha-os.rst:10 -msgid "" -"`openSUSE and SUSE Linux Enterprise Server `_" -msgstr "" - -#: ../install-ha-os.rst:11 -msgid "" -"`Ubuntu `_" -msgstr "" - -#: ../install-ha-os.rst:13 -msgid "" -"The OpenStack Installation Guides also include a list of the services that " -"use passwords with important notes about using them." -msgstr "" - -#: ../install-ha-os.rst:16 -msgid "This guide uses the following example IP addresses:" -msgstr "" - -#: ../install-ha.rst:3 -msgid "Installing high availability packages" -msgstr "" - -#: ../install-ha.rst:5 -msgid "[TODO -- write intro to this section]" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:3 -msgid "The keepalived architecture" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:6 -msgid "High availability strategies" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:8 -msgid "" -"The following diagram shows a very simplified view of the different " -"strategies used to achieve high availability for the OpenStack services:" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:15 -msgid "" -"Depending on the method used to communicate with the service, the following " -"availability strategies will be followed:" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:18 -msgid "Keepalived, for the HAProxy instances." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:19 -msgid "" -"Access via an HAProxy virtual IP, for services such as HTTPd that are " -"accessed via a TCP socket that can be load balanced" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:21 -msgid "" -"Built-in application clustering, when available from the application. Galera " -"is one example of this." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:23 -msgid "" -"Starting up one instance of the service on several controller nodes, when " -"they can coexist and coordinate by other means. RPC in ``nova-conductor`` is " -"one example of this." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:26 -msgid "" -"No high availability, when the service can only work in active/passive mode." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:29 -msgid "" -"There are known issues with cinder-volume that recommend setting it as " -"active-passive for now, see: https://blueprints.launchpad.net/cinder/+spec/" -"cinder-volume-active-active-support" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:33 -msgid "" -"While there will be multiple neutron LBaaS agents running, each agent will " -"manage a set of load balancers, that cannot be failed over to another node." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:38 -msgid "Architecture limitations" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:40 -msgid "" -"This architecture has some inherent limitations that should be kept in mind " -"during deployment and daily operations. The following sections describe " -"these limitations." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:44 -msgid "Keepalived and network partitions" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:46 -msgid "" -"In case of a network partitioning, there is a chance that two or more nodes " -"running keepalived claim to hold the same VIP, which may lead to an " -"undesired behaviour. Since keepalived uses VRRP over multicast to elect a " -"master (VIP owner), a network partition in which keepalived nodes cannot " -"communicate will result in the VIPs existing on two nodes. When the network " -"partition is resolved, the duplicate VIPs should also be resolved. Note that " -"this network partition problem with VRRP is a known limitation for this " -"architecture." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:56 -msgid "Cinder-volume as a single point of failure" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:58 -msgid "" -"There are currently concerns over the cinder-volume service ability to run " -"as a fully active-active service. During the Mitaka timeframe, this is being " -"worked on, see: https://blueprints.launchpad.net/cinder/+spec/cinder-volume-" -"active-active-support Thus, cinder-volume will only be running on one of the " -"controller nodes, even if it will be configured on all nodes. In case of a " -"failure in the node running cinder-volume, it should be started in a " -"surviving controller node." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:67 -msgid "Neutron-lbaas-agent as a single point of failure" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:69 -msgid "" -"The current design of the neutron LBaaS agent using the HAProxy driver does " -"not allow high availability for the tenant load balancers. The neutron-lbaas-" -"agent service will be enabled and running on all controllers, allowing for " -"load balancers to be distributed across all nodes. However, a controller " -"node failure will stop all load balancers running on that node until the " -"service is recovered or the load balancer is manually removed and created " -"again." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:78 -msgid "Service monitoring and recovery required" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:80 -msgid "" -"An external service monitoring infrastructure is required to check the " -"OpenStack service health, and notify operators in case of any failure. This " -"architecture does not provide any facility for that, so it would be " -"necessary to integrate the OpenStack deployment with any existing monitoring " -"environment." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:86 -msgid "Manual recovery after a full cluster restart" -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:88 -msgid "" -"Some support services used by RDO or RHEL OSP use their own form of " -"application clustering. Usually, these services maintain a cluster quorum, " -"that may be lost in case of a simultaneous restart of all cluster nodes, for " -"example during a power outage. Each service will require its own procedure " -"to regain quorum." -msgstr "" - -#: ../intro-ha-arch-keepalived.rst:94 -msgid "" -"If you find any or all of these limitations concerning, you are encouraged " -"to refer to the :doc:`Pacemaker HA architecture` " -"instead." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:3 -msgid "The Pacemaker architecture" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:6 -msgid "What is a cluster manager" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:8 -msgid "" -"At its core, a cluster is a distributed finite state machine capable of co-" -"ordinating the startup and recovery of inter-related services across a set " -"of machines." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:12 -msgid "" -"Even a distributed and/or replicated application that is able to survive " -"failures on one or more machines can benefit from a cluster manager:" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:16 -msgid "Awareness of other applications in the stack" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:18 -msgid "" -"While SYS-V init replacements like systemd can provide deterministic " -"recovery of a complex stack of services, the recovery is limited to one " -"machine and lacks the context of what is happening on other machines - " -"context that is crucial to determine the difference between a local failure, " -"clean startup and recovery after a total site failure." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:25 -msgid "Awareness of instances on other machines" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:27 -msgid "" -"Services like RabbitMQ and Galera have complicated boot-up sequences that " -"require co-ordination, and often serialization, of startup operations across " -"all machines in the cluster. This is especially true after site-wide failure " -"or shutdown where we must first determine the last machine to be active." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:33 -msgid "" -"A shared implementation and calculation of `quorum `_." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:36 -msgid "" -"It is very important that all members of the system share the same view of " -"who their peers are and whether or not they are in the majority. Failure to " -"do this leads very quickly to an internal `split-brain `_ state - where different parts of the " -"system are pulling in different and incompatible directions." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:43 -msgid "" -"Data integrity through fencing (a non-responsive process does not imply it " -"is not doing anything)" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:46 -msgid "" -"A single application does not have sufficient context to know the difference " -"between failure of a machine and failure of the applcation on a machine. The " -"usual practice is to assume the machine is dead and carry on, however this " -"is highly risky - a rogue process or machine could still be responding to " -"requests and generally causing havoc. The safer approach is to make use of " -"remotely accessible power switches and/or network switches and SAN " -"controllers to fence (isolate) the machine before continuing." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:55 -msgid "Automated recovery of failed instances" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:57 -msgid "" -"While the application can still run after the failure of several instances, " -"it may not have sufficient capacity to serve the required volume of " -"requests. A cluster can automatically recover failed instances to prevent " -"additional load induced failures." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:62 -msgid "" -"For this reason, the use of a cluster manager like `Pacemaker `_ is highly recommended." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:66 -msgid "Deployment flavors" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:68 -msgid "" -"It is possible to deploy three different flavors of the Pacemaker " -"architecture. The two extremes are **Collapsed** (where every component runs " -"on every node) and **Segregated** (where every component runs in its own 3+ " -"node cluster)." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:73 -msgid "" -"Regardless of which flavor you choose, it is recommended that the clusters " -"contain at least three nodes so that we can take advantage of `quorum " -"`_." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:77 -msgid "" -"Quorum becomes important when a failure causes the cluster to split in two " -"or more partitions. In this situation, you want the majority to ensure the " -"minority are truly dead (through fencing) and continue to host resources. " -"For a two-node cluster, no side has the majority and you can end up in a " -"situation where both sides fence each other, or both sides are running the " -"same services - leading to data corruption." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:84 -msgid "" -"Clusters with an even number of hosts suffer from similar issues - a single " -"network failure could easily cause a N:N split where neither side retains a " -"majority. For this reason, we recommend an odd number of cluster members " -"when scaling up." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:89 -msgid "" -"You can have up to 16 cluster members (this is currently limited by the " -"ability of corosync to scale higher). In extreme cases, 32 and even up to 64 " -"nodes could be possible, however, this is not well tested." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:94 -msgid "Collapsed" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:96 -msgid "" -"In this configuration, there is a single cluster of 3 or more nodes on which " -"every component is running." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:99 -msgid "" -"This scenario has the advantage of requiring far fewer, if more powerful, " -"machines. Additionally, being part of a single cluster allows us to " -"accurately model the ordering dependencies between components." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:104 -msgid "This scenario can be visualized as below." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:109 -msgid "" -"You would choose this option if you prefer to have fewer but more powerful " -"boxes." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:112 -msgid "This is the most common option and the one we document here." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:115 -msgid "Segregated" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:117 -msgid "" -"In this configuration, each service runs in a dedicated cluster of 3 or more " -"nodes." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:120 -msgid "" -"The benefits of this approach are the physical isolation between components " -"and the ability to add capacity to specific components." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:123 -msgid "" -"You would choose this option if you prefer to have more but less powerful " -"boxes." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:126 -msgid "" -"This scenario can be visualized as below, where each box below represents a " -"cluster of three or more guests." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:133 -msgid "Mixed" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:135 -msgid "" -"It is also possible to follow a segregated approach for one or more " -"components that are expected to be a bottleneck and use a collapsed approach " -"for the remainder." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:141 -msgid "Proxy server" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:143 -msgid "" -"Almost all services in this stack benefit from being proxied. Using a proxy " -"server provides:" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:146 -msgid "Load distribution" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:148 -msgid "" -"Many services can act in an active/active capacity, however, they usually " -"require an external mechanism for distributing requests to one of the " -"available instances. The proxy server can serve this role." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:153 -msgid "API isolation" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:155 -msgid "" -"By sending all API access through the proxy, we can clearly identify service " -"interdependencies. We can also move them to locations other than " -"``localhost`` to increase capacity if the need arises." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:160 -msgid "Simplified process for adding/removing of nodes" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:162 -msgid "" -"Since all API access is directed to the proxy, adding or removing nodes has " -"no impact on the configuration of other services. This can be very useful in " -"upgrade scenarios where an entirely new set of machines can be configured " -"and tested in isolation before telling the proxy to direct traffic there " -"instead." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:168 -msgid "Enhanced failure detection" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:170 -msgid "" -"The proxy can be configured as a secondary mechanism for detecting service " -"failures. It can even be configured to look for nodes in a degraded state " -"(such as being 'too far' behind in the replication) and take them out of " -"circulation." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:175 -msgid "" -"The following components are currently unable to benefit from the use of a " -"proxy server:" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:180 -msgid "MongoDB" -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:182 -msgid "" -"However, the reasons vary and are discussed under each component's heading." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:185 -msgid "" -"We recommend HAProxy as the load balancer, however, there are many " -"alternatives in the marketplace." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:188 -msgid "" -"We use a check interval of 1 second, however, the timeouts vary by service." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:190 -msgid "" -"Generally, we use round-robin to distribute load amongst instances of active/" -"active services, however, Galera uses the ``stick-table`` options to ensure " -"that incoming connections to the virtual IP (VIP) should be directed to only " -"one of the available back ends." -msgstr "" - -#: ../intro-ha-arch-pacemaker.rst:195 -msgid "" -"In Galera's case, although it can run active/active, this helps avoid lock " -"contention and prevent deadlocks. It is used in combination with the " -"``httpchk`` option that ensures only nodes that are in sync with its peers " -"are allowed to handle requests." -msgstr "" - -#: ../intro-ha-compute.rst:4 -msgid "Overview of highly-available compute nodes" -msgstr "" - -#: ../intro-ha-concepts.rst:3 -msgid "High availability concepts" -msgstr "" - -#: ../intro-ha-concepts.rst:5 -msgid "High availability systems seek to minimize two things:" -msgstr "" - -#: ../intro-ha-concepts.rst:8 -msgid "" -"Occurs when a user-facing service is unavailable beyond a specified maximum " -"amount of time." -msgstr "" - -#: ../intro-ha-concepts.rst:9 -msgid "**System downtime**" -msgstr "" - -#: ../intro-ha-concepts.rst:12 -msgid "**Data loss**" -msgstr "" - -#: ../intro-ha-concepts.rst:12 -msgid "Accidental deletion or destruction of data." -msgstr "" - -#: ../intro-ha-concepts.rst:14 -msgid "" -"Most high availability systems guarantee protection against system downtime " -"and data loss only in the event of a single failure. However, they are also " -"expected to protect against cascading failures, where a single failure " -"deteriorates into a series of consequential failures. Many service providers " -"guarantee :term:`Service Level Agreement (SLA)` including uptime percentage " -"of computing service, which is calculated based on the available time and " -"system downtime excluding planned outage time." -msgstr "" - -#: ../intro-ha-concepts.rst:23 -msgid "Redundancy and failover" -msgstr "" - -#: ../intro-ha-concepts.rst:25 -msgid "" -"High availability is implemented with redundant hardware running redundant " -"instances of each service. If one piece of hardware running one instance of " -"a service fails, the system can then failover to use another instance of a " -"service that is running on hardware that did not fail." -msgstr "" - -#: ../intro-ha-concepts.rst:31 -msgid "" -"A crucial aspect of high availability is the elimination of single points of " -"failure (SPOFs). A SPOF is an individual piece of equipment or software that " -"causes system downtime or data loss if it fails. In order to eliminate " -"SPOFs, check that mechanisms exist for redundancy of:" -msgstr "" - -#: ../intro-ha-concepts.rst:37 -msgid "Network components, such as switches and routers" -msgstr "" - -#: ../intro-ha-concepts.rst:39 -msgid "Applications and automatic service migration" -msgstr "" - -#: ../intro-ha-concepts.rst:41 -msgid "Storage components" -msgstr "" - -#: ../intro-ha-concepts.rst:43 -msgid "Facility services such as power, air conditioning, and fire protection" -msgstr "" - -#: ../intro-ha-concepts.rst:45 -msgid "" -"In the event that a component fails and a back-up system must take on its " -"load, most high availability systems will replace the failed component as " -"quickly as possible to maintain necessary redundancy. This way time spent in " -"a degraded protection state is minimized." -msgstr "" - -#: ../intro-ha-concepts.rst:50 -msgid "" -"Most high availability systems fail in the event of multiple independent " -"(non-consequential) failures. In this case, most implementations favor " -"protecting data over maintaining availability." -msgstr "" - -#: ../intro-ha-concepts.rst:54 -msgid "" -"High availability systems typically achieve an uptime percentage of 99.99% " -"or more, which roughly equates to less than an hour of cumulative downtime " -"per year. In order to achieve this, high availability systems should keep " -"recovery times after a failure to about one to two minutes, sometimes " -"significantly less." -msgstr "" - -#: ../intro-ha-concepts.rst:60 -msgid "" -"OpenStack currently meets such availability requirements for its own " -"infrastructure services, meaning that an uptime of 99.99% is feasible for " -"the OpenStack infrastructure proper. However, OpenStack does not guarantee " -"99.99% availability for individual guest instances." -msgstr "" - -#: ../intro-ha-concepts.rst:65 -msgid "" -"This document discusses some common methods of implementing highly available " -"systems, with an emphasis on the core OpenStack services and other open " -"source services that are closely aligned with OpenStack. These methods are " -"by no means the only ways to do it; you may supplement these services with " -"commercial hardware and software that provides additional features and " -"functionality. You also need to address high availability concerns for any " -"applications software that you run on your OpenStack environment. The " -"important thing is to make sure that your services are redundant and " -"available; how you achieve that is up to you." -msgstr "" - -#: ../intro-ha-concepts.rst:77 -msgid "Stateless vs. stateful services" -msgstr "" - -#: ../intro-ha-concepts.rst:79 -msgid "" -"Preventing single points of failure can depend on whether or not a service " -"is stateless." -msgstr "" - -#: ../intro-ha-concepts.rst:83 -msgid "" -"A service that provides a response after your request and then requires no " -"further attention. To make a stateless service highly available, you need to " -"provide redundant instances and load balance them. OpenStack services that " -"are stateless include ``nova-api``, ``nova-conductor``, ``glance-api``, " -"``keystone-api``, ``neutron-api`` and ``nova-scheduler``." -msgstr "" - -#: ../intro-ha-concepts.rst:89 -msgid "Stateless service" -msgstr "" - -#: ../intro-ha-concepts.rst:92 -msgid "" -"A service where subsequent requests to the service depend on the results of " -"the first request. Stateful services are more difficult to manage because a " -"single action typically involves more than one request, so simply providing " -"additional instances and load balancing does not solve the problem. For " -"example, if the horizon user interface reset itself every time you went to a " -"new page, it would not be very useful. OpenStack services that are stateful " -"include the OpenStack database and message queue. Making stateful services " -"highly available can depend on whether you choose an active/passive or " -"active/active configuration." -msgstr "" - -#: ../intro-ha-concepts.rst:102 -msgid "Stateful service" -msgstr "" - -#: ../intro-ha-concepts.rst:105 -msgid "Active/Passive vs Active/Active" -msgstr "" - -#: ../intro-ha-concepts.rst:107 -msgid "Stateful services may be configured as active/passive or active/active:" -msgstr "" - -#: ../intro-ha-concepts.rst:110 -msgid "" -"Maintains a redundant instance that can be brought online when the active " -"service fails. For example, OpenStack writes to the main database while " -"maintaining a disaster recovery database that can be brought online if the " -"main database fails." -msgstr "" - -#: ../intro-ha-concepts.rst:116 -msgid "" -"A typical active/passive installation for a stateful service maintains a " -"replacement resource that can be brought online when required. Requests are " -"handled using a :term:`virtual IP` address (VIP) that facilitates returning " -"to service with minimal reconfiguration. A separate application (such as " -"Pacemaker or Corosync) monitors these services, bringing the backup online " -"as necessary." -msgstr "" - -#: ../intro-ha-concepts.rst:121 -msgid ":term:`active/passive configuration`" -msgstr "" - -#: ../intro-ha-concepts.rst:124 -msgid "" -"Each service also has a backup but manages both the main and redundant " -"systems concurrently. This way, if there is a failure, the user is unlikely " -"to notice. The backup system is already online and takes on increased load " -"while the main system is fixed and brought back online." -msgstr "" - -#: ../intro-ha-concepts.rst:130 -msgid "" -"Typically, an active/active installation for a stateless service maintains a " -"redundant instance, and requests are load balanced using a virtual IP " -"address and a load balancer such as HAProxy." -msgstr "" - -#: ../intro-ha-concepts.rst:134 -msgid "" -"A typical active/active installation for a stateful service includes " -"redundant services, with all instances having an identical state. In other " -"words, updates to one instance of a database update all other instances. " -"This way a request to one instance is the same as a request to any other. A " -"load balancer manages the traffic to these systems, ensuring that " -"operational systems always handle the request." -msgstr "" - -#: ../intro-ha-concepts.rst:140 -msgid ":term:`active/active configuration`" -msgstr "" - -#: ../intro-ha-concepts.rst:143 -msgid "Clusters and quorums" -msgstr "" - -#: ../intro-ha-concepts.rst:145 -msgid "" -"The quorum specifies the minimal number of nodes that must be functional in " -"a cluster of redundant nodes in order for the cluster to remain functional. " -"When one node fails and failover transfers control to other nodes, the " -"system must ensure that data and processes remain sane. To determine this, " -"the contents of the remaining nodes are compared and, if there are " -"discrepancies, a \"majority rules\" algorithm is implemented." -msgstr "" - -#: ../intro-ha-concepts.rst:153 -msgid "" -"For this reason, each cluster in a high availability environment should have " -"an odd number of nodes and the quorum is defined as more than a half of the " -"nodes. If multiple nodes fail so that the cluster size falls below the " -"quorum value, the cluster itself fails." -msgstr "" - -#: ../intro-ha-concepts.rst:159 -msgid "" -"For example, in a seven-node cluster, the quorum should be set to floor(7/2) " -"+ 1 == 4. If quorum is four and four nodes fail simultaneously, the cluster " -"itself would fail, whereas it would continue to function, if no more than " -"three nodes fail. If split to partitions of three and four nodes " -"respectively, the quorum of four nodes would continue to operate the " -"majority partition and stop or fence the minority one (depending on the no-" -"quorum-policy cluster configuration)." -msgstr "" - -#: ../intro-ha-concepts.rst:167 -msgid "" -"And the quorum could also have been set to three, just as a configuration " -"example." -msgstr "" - -#: ../intro-ha-concepts.rst:172 -msgid "" -"Note that setting the quorum to a value less than floor(n/2) + 1 is not " -"recommended and would likely cause a split-brain in a face of network " -"partitions." -msgstr "" - -#: ../intro-ha-concepts.rst:176 -msgid "" -"Then, for the given example when four nodes fail simultaneously, the cluster " -"would continue to function as well. But if split to partitions of three and " -"four nodes respectively, the quorum of three would have made both sides to " -"attempt to fence the other and host resources. And without fencing enabled, " -"it would go straight to running two copies of each resource." -msgstr "" - -#: ../intro-ha-concepts.rst:182 -msgid "" -"This is why setting the quorum to a value less than floor(n/2) + 1 is " -"dangerous. However it may be required for some specific cases, like a " -"temporary measure at a point it is known with 100% certainty that the other " -"nodes are down." -msgstr "" - -#: ../intro-ha-concepts.rst:187 -msgid "" -"When configuring an OpenStack environment for study or demonstration " -"purposes, it is possible to turn off the quorum checking; this is discussed " -"later in this guide. Production systems should always run with quorum " -"enabled." -msgstr "" - -#: ../intro-ha-concepts.rst:194 -msgid "Single-controller high availability mode" -msgstr "" - -#: ../intro-ha-concepts.rst:196 -msgid "" -"OpenStack supports a single-controller high availability mode that is " -"managed by the services that manage highly available environments but is not " -"actually highly available because no redundant controllers are configured to " -"use for failover. This environment can be used for study and demonstration " -"but is not appropriate for a production environment." -msgstr "" - -#: ../intro-ha-concepts.rst:203 -msgid "" -"It is possible to add controllers to such an environment to convert it into " -"a truly highly available environment." -msgstr "" - -#: ../intro-ha-concepts.rst:207 -msgid "" -"High availability is not for every user. It presents some challenges. High " -"availability may be too complex for databases or systems with large amounts " -"of data. Replication can slow large systems down. Different setups have " -"different prerequisites. Read the guidelines for each setup." -msgstr "" - -#: ../intro-ha-concepts.rst:213 -msgid "High availability is turned off as the default in OpenStack setups." -msgstr "" - -#: ../intro-ha-controller.rst:3 -msgid "Overview of highly-available controllers" -msgstr "" - -#: ../intro-ha-controller.rst:5 -msgid "" -"OpenStack is a set of multiple services exposed to the end users as HTTP(s) " -"APIs. Additionally, for own internal usage OpenStack requires SQL database " -"server and AMQP broker. The physical servers, where all the components are " -"running are often called controllers. This modular OpenStack architecture " -"allows to duplicate all the components and run them on different " -"controllers. By making all the components redundant it is possible to make " -"OpenStack highly-available." -msgstr "" - -#: ../intro-ha-controller.rst:14 -msgid "" -"In general we can divide all the OpenStack components into three categories:" -msgstr "" - -#: ../intro-ha-controller.rst:16 -msgid "" -"OpenStack APIs, these are HTTP(s) stateless services written in python, easy " -"to duplicate and mostly easy to load balance." -msgstr "" - -#: ../intro-ha-controller.rst:19 -msgid "" -"SQL relational database server provides stateful type consumed by other " -"components. Supported databases are MySQL, MariaDB, and PostgreSQL. Making " -"SQL database redundant is complex." -msgstr "" - -#: ../intro-ha-controller.rst:23 -msgid "" -":term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack internal " -"stateful communication service." -msgstr "" - -#: ../intro-ha-controller.rst:27 -msgid "Network components" -msgstr "" - -#: ../intro-ha-controller.rst:29 -msgid "" -"[TODO Need discussion of network hardware, bonding interfaces, intelligent " -"Layer 2 switches, routers and Layer 3 switches.]" -msgstr "" - -#: ../intro-ha-controller.rst:32 -msgid "" -"The configuration uses static routing without Virtual Router Redundancy " -"Protocol (VRRP) or similar techniques implemented." -msgstr "" - -#: ../intro-ha-controller.rst:36 -msgid "" -"[TODO Need description of VIP failover inside Linux namespaces and expected " -"SLA.]" -msgstr "" - -#: ../intro-ha-controller.rst:39 -msgid "" -"See [TODO link] for more information about configuring networking for high " -"availability." -msgstr "" - -#: ../intro-ha-controller.rst:43 -msgid "Common deployement architectures" -msgstr "" - -#: ../intro-ha-controller.rst:45 -msgid "There are primarily two HA architectures in use today." -msgstr "" - -#: ../intro-ha-controller.rst:47 -msgid "" -"One uses a cluster manager such as Pacemaker or Veritas to co-ordinate the " -"actions of the various services across a set of machines. Since we are " -"focused on FOSS, we will refer to this as the Pacemaker architecture." -msgstr "" - -#: ../intro-ha-controller.rst:52 -msgid "" -"The other is optimized for Active/Active services that do not require any " -"inter-machine coordination. In this setup, services are started by your init " -"system (systemd in most modern distributions) and a tool is used to move IP " -"addresses between the hosts. The most common package for doing this is " -"keepalived." -msgstr "" - -#: ../intro-ha-other.rst:4 -msgid "High availability for other components" -msgstr "" - -#: ../intro-ha-storage.rst:3 -msgid "Overview of high availability storage" -msgstr "" - -#: ../intro-ha-storage.rst:5 -msgid "" -"Making the Block Storage (cinder) API service highly available in active/" -"passive mode involves:" -msgstr "" - -#: ../intro-ha-storage.rst:8 -msgid "Configuring Block Storage to listen on the VIP address" -msgstr "" - -#: ../intro-ha-storage.rst:10 -msgid "" -"Managing the Block Storage API daemon with the Pacemaker cluster manager" -msgstr "" - -#: ../intro-ha-storage.rst:12 -msgid "Configuring OpenStack services to use this IP address" -msgstr "" - -#: ../intro-ha.rst:4 -msgid "Introduction to OpenStack high availability" -msgstr "" - -#: ../networking-ha-dhcp.rst:6 -msgid "Run neutron DHCP agent" -msgstr "" - -#: ../networking-ha-dhcp.rst:8 -msgid "" -"The OpenStack Networking service has a scheduler that lets you run multiple " -"agents across nodes; the DHCP agent can be natively highly available. To " -"configure the number of DHCP agents per network, modify the " -"``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron." -"conf` file. By default this is set to 1. To achieve high availability, " -"assign more than one DHCP agent per network." -msgstr "" - -#: ../networking-ha-l3.rst:0 -msgid "/etc/neutron/neutron.conf parameters for high availability" -msgstr "" - -#: ../networking-ha-l3.rst:6 -msgid "Run neutron L3 agent" -msgstr "" - -#: ../networking-ha-l3.rst:8 -msgid "" -"The neutron L3 agent is scalable, due to the scheduler that supports Virtual " -"Router Redundancy Protocol (VRRP) to distribute virtual routers across " -"multiple nodes. To enable high availability for configured routers, edit " -"the :file:`/etc/neutron/neutron.conf` file to set the following values:" -msgstr "" - -#: ../networking-ha-l3.rst:19 -msgid "Parameter" -msgstr "" - -#: ../networking-ha-l3.rst:20 -msgid "Value" -msgstr "" - -#: ../networking-ha-l3.rst:21 -msgid "Description" -msgstr "" - -#: ../networking-ha-l3.rst:22 -msgid "l3_ha" -msgstr "" - -#: ../networking-ha-l3.rst:23 ../networking-ha-l3.rst:26 -msgid "True" -msgstr "" - -#: ../networking-ha-l3.rst:24 -msgid "All routers are highly available by default." -msgstr "" - -#: ../networking-ha-l3.rst:25 -msgid "allow_automatic_l3agent_failover" -msgstr "" - -#: ../networking-ha-l3.rst:27 -msgid "Set automatic L3 agent failover for routers" -msgstr "" - -#: ../networking-ha-l3.rst:28 -msgid "max_l3_agents_per_router" -msgstr "" - -#: ../networking-ha-l3.rst:29 ../networking-ha-l3.rst:32 -msgid "2 or more" -msgstr "" - -#: ../networking-ha-l3.rst:30 -msgid "Maximum number of network nodes to use for the HA router." -msgstr "" - -#: ../networking-ha-l3.rst:31 -msgid "min_l3_agents_per_router" -msgstr "" - -#: ../networking-ha-l3.rst:33 -msgid "" -"Minimum number of network nodes to use for the HA router. A new router can " -"be created only if this number of network nodes are available." -msgstr "" - -#: ../networking-ha-lbaas.rst:6 -msgid "Run neutron LBaaS agent" -msgstr "" - -#: ../networking-ha-lbaas.rst:8 -msgid "" -"Currently, no native feature is provided to make the LBaaS agent highly " -"available using the default plug-in HAProxy. A common way to make HAProxy " -"highly available is to use the VRRP (Virtual Router Redundancy Protocol). " -"Unfortunately, this is not yet implemented in the LBaaS HAProxy plug-in." -msgstr "" - -#: ../networking-ha-lbaas.rst:16 -msgid "[TODO: update this section.]" -msgstr "" - -#: ../networking-ha-metadata.rst:6 -msgid "Run neutron metadata agent" -msgstr "" - -#: ../networking-ha-metadata.rst:8 -msgid "" -"No native feature is available to make this service highly available. At " -"this time, the Active/Passive solution exists to run the neutron metadata " -"agent in failover mode with Pacemaker." -msgstr "" - -#: ../networking-ha-metadata.rst:14 -msgid "" -"[TODO: Update this information. Can this service now be made HA in active/" -"active mode or do we need to pull in the instructions to run this service in " -"active/passive mode?]" -msgstr "" - -#: ../networking-ha.rst:4 -msgid "OpenStack network nodes" -msgstr "" - -#: ../networking-ha.rst:6 -msgid "" -"Configure networking on each node. The `Networking `_ section of " -"the *Install Guide* includes basic information about configuring networking." -msgstr "" - -#: ../networking-ha.rst:12 -msgid "Notes from planning outline:" -msgstr "" - -#: ../networking-ha.rst:14 -msgid "" -"Rather than configuring neutron here, we should simply mention physical " -"network HA methods such as bonding and additional node/network requirements " -"for L3HA and DVR for planning purposes." -msgstr "" - -#: ../networking-ha.rst:18 -msgid "" -"Neutron agents shuld be described for active/active; deprecate single " -"agent's instances case." -msgstr "" - -#: ../networking-ha.rst:20 -msgid "For Kilo and beyond, focus on L3HA and DVR." -msgstr "" - -#: ../networking-ha.rst:21 -msgid "" -"Link to `Networking Guide `_ " -"for configuration details." -msgstr "" - -#: ../networking-ha.rst:24 -msgid "" -"[TODO: Verify that the active/passive network configuration information from " -"``_ should not be included here." -msgstr "" - -#: ../networking-ha.rst:29 -msgid "" -"`LP1328922 ` and " -"`LP1349398 ` are " -"related.]" -msgstr "" - -#: ../networking-ha.rst:34 -msgid "OpenStack network nodes contain:" -msgstr "" - -#: ../networking-ha.rst:36 -msgid ":ref:`Neutron DHCP agent`" -msgstr "" - -#: ../networking-ha.rst:37 -msgid "" -"Neutron L2 agent. Note that the L2 agent cannot be distributed and highly " -"available. Instead, it must be installed on each data forwarding node to " -"control the virtual network drivers such as Open vSwitch or Linux Bridge. " -"One L2 agent runs per node and controls its virtual interfaces." -msgstr "" - -#: ../networking-ha.rst:43 -msgid ":ref:`Neutron L3 agent`" -msgstr "" - -#: ../networking-ha.rst:44 -msgid ":ref:`Neutron metadata agent`" -msgstr "" - -#: ../networking-ha.rst:45 -msgid ":ref:`Neutron LBaaS` (Load Balancing as a Service) agent" -msgstr "" - -#: ../networking-ha.rst:49 -msgid "" -"For Liberty, we do not have the standalone network nodes in general. We " -"usually run the Networking services on the controller nodes. In this guide, " -"we use the term \"network nodes\" for convenience." -msgstr "" - -#: ../noncore-ha.rst:4 -msgid "Configuring non-core components for high availability" -msgstr "" - -#: ../storage-ha-backend.rst:6 -msgid "Storage back end" -msgstr "" - -#: ../storage-ha-backend.rst:8 -msgid "" -"Most of this guide concerns the control plane of high availability: ensuring " -"that services continue to run even if a component fails. Ensuring that data " -"is not lost is the data plane component of high availability; this is " -"discussed here." -msgstr "" - -#: ../storage-ha-backend.rst:14 -msgid "An OpenStack environment includes multiple data pools for the VMs:" -msgstr "" - -#: ../storage-ha-backend.rst:16 -msgid "" -"Ephemeral storage is allocated for an instance and is deleted when the " -"instance is deleted. The Compute service manages ephemeral storage. By " -"default, Compute stores ephemeral drives as files on local disks on the " -"Compute node but Ceph RBD can instead be used as the storage back end for " -"ephemeral storage." -msgstr "" - -#: ../storage-ha-backend.rst:24 -msgid "" -"Persistent storage exists outside all instances. Two types of persistent " -"storage are provided:" -msgstr "" - -#: ../storage-ha-backend.rst:27 -msgid "" -"Block Storage service (cinder) can use LVM or Ceph RBD as the storage back " -"end." -msgstr "" - -#: ../storage-ha-backend.rst:29 -msgid "" -"Image service (glance) can use the Object Storage service (swift) or Ceph " -"RBD as the storage back end." -msgstr "" - -#: ../storage-ha-backend.rst:33 -msgid "" -"For more information about configuring storage back ends for the different " -"storage options, see the `Cloud Administrator Guide `_." -msgstr "" - -#: ../storage-ha-backend.rst:37 -msgid "" -"This section discusses ways to protect against data loss in your OpenStack " -"environment." -msgstr "" - -#: ../storage-ha-backend.rst:41 -msgid "RAID drives" -msgstr "" - -#: ../storage-ha-backend.rst:43 -msgid "" -"Configuring RAID on the hard drives that implement storage protects your " -"data against a hard drive failure. If, however, the node itself fails, data " -"may be lost. In particular, all volumes stored on an LVM node can be lost." -msgstr "" - -#: ../storage-ha-backend.rst:49 -msgid "Ceph" -msgstr "" - -#: ../storage-ha-backend.rst:51 -msgid "" -"`Ceph RBD `_ is an innately high availability storage back " -"end. It creates a storage cluster with multiple nodes that communicate with " -"each other to replicate and redistribute data dynamically. A Ceph RBD " -"storage cluster provides a single shared set of storage nodes that can " -"handle all classes of persistent and ephemeral data -- glance, cinder, and " -"nova -- that are required for OpenStack instances." -msgstr "" - -#: ../storage-ha-backend.rst:62 -msgid "" -"Ceph RBD provides object replication capabilities by storing Block Storage " -"volumes as Ceph RBD objects; Ceph RBD ensures that each replica of an object " -"is stored on a different node. This means that your volumes are protected " -"against hard drive and node failures or even the failure of the data center " -"itself." -msgstr "" - -#: ../storage-ha-backend.rst:70 -msgid "" -"When Ceph RBD is used for ephemeral volumes as well as block and image " -"storage, it supports `live migration `_ of VMs with ephemeral drives; LVM " -"only supports live migration of volume-backed VMs." -msgstr "" - -#: ../storage-ha-backend.rst:78 -msgid "Remote backup facilities" -msgstr "" - -#: ../storage-ha-backend.rst:80 -msgid "" -"[TODO: Add discussion of remote backup facilities as an alternate way to " -"secure ones data. Include brief mention of key third-party technologies with " -"links to their documentation]" -msgstr "" - -#: ../storage-ha-cinder.rst:6 -msgid "Highly available Block Storage API" -msgstr "" - -#: ../storage-ha-cinder.rst:8 -msgid "" -"Cinder provides 'block storage as a service' suitable for performance " -"sensitive scenarios such as databases, expandable file systems, or providing " -"a server with access to raw block level storage." -msgstr "" - -#: ../storage-ha-cinder.rst:12 -msgid "" -"Persistent block storage can survive instance termination and can also be " -"moved across instances like any external storage device. Cinder also has " -"volume snapshots capability for backing up the volumes." -msgstr "" - -#: ../storage-ha-cinder.rst:16 -msgid "" -"Making this Block Storage API service highly available in active/passive " -"mode involves:" -msgstr "" - -#: ../storage-ha-cinder.rst:19 -msgid ":ref:`ha-cinder-pacemaker`" -msgstr "" - -#: ../storage-ha-cinder.rst:20 -msgid ":ref:`ha-cinder-configure`" -msgstr "" - -#: ../storage-ha-cinder.rst:21 -msgid ":ref:`ha-cinder-services`" -msgstr "" - -#: ../storage-ha-cinder.rst:23 -msgid "" -"In theory, you can run the Block Storage service as active/active. However, " -"because of sufficient concerns, it is recommended running the volume " -"component as active/passive only." -msgstr "" - -#: ../storage-ha-cinder.rst:27 -msgid "Jon Bernard writes:" -msgstr "" - -#: ../storage-ha-cinder.rst:63 -msgid "" -"You can read more about these concerns on the `Red Hat Bugzilla `_ and there is a `psuedo " -"roadmap `_ " -"for addressing them upstream." -msgstr "" - -#: ../storage-ha-cinder.rst:73 -msgid "Add Block Storage API resource to Pacemaker" -msgstr "" - -#: ../storage-ha-cinder.rst:75 -msgid "" -"On RHEL-based systems, you should create resources for cinder's systemd " -"agents and create constraints to enforce startup/shutdown ordering:" -msgstr "" - -#: ../storage-ha-cinder.rst:91 -msgid "" -"If the Block Storage service runs on the same nodes as the other services, " -"then it is advisable to also include:" -msgstr "" - -#: ../storage-ha-cinder.rst:98 -msgid "" -"Alternatively, instead of using systemd agents, download and install the OCF " -"resource agent:" -msgstr "" - -#: ../storage-ha-cinder.rst:107 -msgid "" -"You can now add the Pacemaker configuration for Block Storage API resource. " -"Connect to the Pacemaker cluster with the :command:`crm configure` command " -"and add the following cluster resources:" -msgstr "" - -#: ../storage-ha-cinder.rst:121 -msgid "" -"This configuration creates ``p_cinder-api``, a resource for managing the " -"Block Storage API service." -msgstr "" - -#: ../storage-ha-cinder.rst:124 -msgid "" -"The command :command:`crm configure` supports batch input, so you may copy " -"and paste the lines above into your live pacemaker configuration and then " -"make changes as required. For example, you may enter ``edit p_ip_cinder-" -"api`` from the :command:`crm configure` menu and edit the resource to match " -"your preferred virtual IP address." -msgstr "" - -#: ../storage-ha-cinder.rst:131 -msgid "" -"Once completed, commit your configuration changes by entering :command:" -"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " -"Block Storage API service and its dependent resources on one of your nodes." -msgstr "" - -#: ../storage-ha-cinder.rst:139 -msgid "Configure Block Storage API service" -msgstr "" - -#: ../storage-ha-cinder.rst:141 -msgid "Edit the ``/etc/cinder/cinder.conf`` file:" -msgstr "" - -#: ../storage-ha-cinder.rst:143 -msgid "On a RHEL-based system, it should look something like:" -msgstr "" - -#: ../storage-ha-cinder.rst:184 -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database. Replace ``CINDER_PASS`` with the password you chose for the " -"``cinder`` user in the Identity service." -msgstr "" - -#: ../storage-ha-cinder.rst:188 -msgid "" -"This example assumes that you are using NFS for the physical storage, which " -"will almost never be true in a production installation." -msgstr "" - -#: ../storage-ha-cinder.rst:191 -msgid "" -"If you are using the Block Storage service OCF agent, some settings will be " -"filled in for you, resulting in a shorter configuration file:" -msgstr "" - -#: ../storage-ha-cinder.rst:212 -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database." -msgstr "" - -#: ../storage-ha-cinder.rst:218 -msgid "Configure OpenStack services to use highly available Block Storage API" -msgstr "" - -#: ../storage-ha-cinder.rst:220 -msgid "" -"Your OpenStack services must now point their Block Storage API configuration " -"to the highly available, virtual cluster IP address rather than a Block " -"Storage API server’s physical IP address as you would for a non-HA " -"environment." -msgstr "" - -#: ../storage-ha-cinder.rst:226 -msgid "You must create the Block Storage API endpoint with this IP." -msgstr "" - -#: ../storage-ha-cinder.rst:228 -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IPs and define your endpoint like this:" -msgstr "" - -#: ../storage-ha-glance.rst:3 -msgid "Highly available OpenStack Image API" -msgstr "" - -#: ../storage-ha-glance.rst:5 -msgid "" -"The OpenStack Image service offers a service for discovering, registering, " -"and retrieving virtual machine images. To make the OpenStack Image API " -"service highly available in active / passive mode, you must:" -msgstr "" - -#: ../storage-ha-glance.rst:10 -msgid ":ref:`glance-api-pacemaker`" -msgstr "" - -#: ../storage-ha-glance.rst:11 -msgid ":ref:`glance-api-configure`" -msgstr "" - -#: ../storage-ha-glance.rst:12 -msgid ":ref:`glance-services`" -msgstr "" - -#: ../storage-ha-glance.rst:14 -msgid "" -"This section assumes that you are familiar with the `documentation `_ for " -"installing the OpenStack Image API service." -msgstr "" - -#: ../storage-ha-glance.rst:22 -msgid "Add OpenStack Image API resource to Pacemaker" -msgstr "" - -# #-#-#-#-# storage-ha-glance.pot (High Availability Guide 0.0.1) #-#-#-#-# -# #-#-#-#-# storage-ha-manila.pot (High Availability Guide 0.0.1) #-#-#-#-# -#: ../storage-ha-glance.rst:24 ../storage-ha-manila.rst:20 -msgid "You must first download the resource agent to your system:" -msgstr "" - -#: ../storage-ha-glance.rst:32 -msgid "" -"You can now add the Pacemaker configuration for the OpenStack Image API " -"resource. Use the :command:`crm configure` command to connect to the " -"Pacemaker cluster and add the following cluster resources:" -msgstr "" - -#: ../storage-ha-glance.rst:47 -msgid "" -"This configuration creates ``p_glance-api``, a resource for managing the " -"OpenStack Image API service." -msgstr "" - -#: ../storage-ha-glance.rst:50 -msgid "" -"The :command:`crm configure` command supports batch input, so you may copy " -"and paste the above into your live Pacemaker configuration and then make " -"changes as required. For example, you may enter edit ``p_ip_glance-api`` " -"from the :command:`crm configure` menu and edit the resource to match your " -"preferred virtual IP address." -msgstr "" - -#: ../storage-ha-glance.rst:57 -msgid "" -"After completing these steps, commit your configuration changes by entering :" -"command:`commit` from the :command:`crm configure` menu. Pacemaker then " -"starts the OpenStack Image API service and its dependent resources on one of " -"your nodes." -msgstr "" - -#: ../storage-ha-glance.rst:66 -msgid "Configure OpenStack Image service API" -msgstr "" - -#: ../storage-ha-glance.rst:68 -msgid "" -"Edit the :file:`/etc/glance/glance-api.conf` file to configure the OpenStack " -"image service:" -msgstr "" - -#: ../storage-ha-glance.rst:91 -msgid "[TODO: need more discussion of these parameters]" -msgstr "" - -#: ../storage-ha-glance.rst:96 -msgid "" -"Configure OpenStack services to use highly available OpenStack Image API" -msgstr "" - -#: ../storage-ha-glance.rst:98 -msgid "" -"Your OpenStack services must now point their OpenStack Image API " -"configuration to the highly available, virtual cluster IP address instead of " -"pointint to the physical IP address of an OpenStack Image API server as you " -"would in a non-HA cluster." -msgstr "" - -#: ../storage-ha-glance.rst:105 -msgid "" -"For OpenStack Compute, for example, if your OpenStack Image API service IP " -"address is 10.0.0.11 (as in the configuration explained here), you would use " -"the following configuration in your :file:`nova.conf` file:" -msgstr "" - -#: ../storage-ha-glance.rst:118 -msgid "" -"You must also create the OpenStack Image API endpoint with this IP address. " -"If you are using both private and public IP addresses, you should create two " -"virtual IP addresses and define your endpoint like this:" -msgstr "" - -#: ../storage-ha-manila.rst:6 -msgid "Highly available Shared File Systems API" -msgstr "" - -#: ../storage-ha-manila.rst:8 -msgid "" -"Making the Shared File Systems (manila) API service highly available in " -"active/passive mode involves:" -msgstr "" - -#: ../storage-ha-manila.rst:11 -msgid ":ref:`ha-manila-pacemaker`" -msgstr "" - -#: ../storage-ha-manila.rst:12 -msgid ":ref:`ha-manila-configure`" -msgstr "" - -#: ../storage-ha-manila.rst:13 -msgid ":ref:`ha-manila-services`" -msgstr "" - -#: ../storage-ha-manila.rst:18 -msgid "Add Shared File Systems API resource to Pacemaker" -msgstr "" - -#: ../storage-ha-manila.rst:28 -msgid "" -"You can now add the Pacemaker configuration for the Shared File Systems API " -"resource. Connect to the Pacemaker cluster with the :command:`crm configure` " -"command and add the following cluster resources:" -msgstr "" - -#: ../storage-ha-manila.rst:42 -msgid "" -"This configuration creates ``p_manila-api``, a resource for managing the " -"Shared File Systems API service." -msgstr "" - -#: ../storage-ha-manila.rst:45 -msgid "" -"The :command:`crm configure` supports batch input, so you may copy and paste " -"the lines above into your live Pacemaker configuration and then make changes " -"as required. For example, you may enter ``edit p_ip_manila-api`` from the :" -"command:`crm configure` menu and edit the resource to match your preferred " -"virtual IP address." -msgstr "" - -#: ../storage-ha-manila.rst:51 -msgid "" -"Once completed, commit your configuration changes by entering :command:" -"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " -"Shared File Systems API service and its dependent resources on one of your " -"nodes." -msgstr "" - -#: ../storage-ha-manila.rst:59 -msgid "Configure Shared File Systems API service" -msgstr "" - -#: ../storage-ha-manila.rst:61 -msgid "Edit the :file:`/etc/manila/manila.conf` file:" -msgstr "" - -#: ../storage-ha-manila.rst:80 -msgid "Configure OpenStack services to use HA Shared File Systems API" -msgstr "" - -#: ../storage-ha-manila.rst:82 -msgid "" -"Your OpenStack services must now point their Shared File Systems API " -"configuration to the highly available, virtual cluster IP address rather " -"than a Shared File Systems API server’s physical IP address as you would for " -"a non-HA environment." -msgstr "" - -#: ../storage-ha-manila.rst:87 -msgid "You must create the Shared File Systems API endpoint with this IP." -msgstr "" - -#: ../storage-ha-manila.rst:89 -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IPs and define your endpoints like this:" -msgstr "" - -#: ../storage-ha.rst:3 -msgid "Configuring Storage for high availability" -msgstr "" diff --git a/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po b/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po deleted file mode 100644 index 31d2ea01..00000000 --- a/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po +++ /dev/null @@ -1,4398 +0,0 @@ -# Akihiro Motoki , 2015. #zanata -# KATO Tomoyuki , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Yuko Katabami , 2015. #zanata -# KATO Tomoyuki , 2016. #zanata -# Kyohei Moriyama , 2016. #zanata -# Shinichi Take , 2016. #zanata -# Yuta Hono , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: High Availability Guide 0.0.1\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2016-03-05 00:17+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-31 12:49+0000\n" -"Last-Translator: Kyohei Moriyama \n" -"Language: ja\n" -"Plural-Forms: nplurals=1; plural=0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Japanese\n" - -msgid "**Cluster Address** List the IP addresses for each cluster node." -msgstr "**クラスターアドレス** 各クラスターノードの IP アドレスを表示します。" - -msgid "**Cluster Name** Define an arbitrary name for your cluster." -msgstr "**クラスター名** 任意のクラスターの名前を定義します。" - -msgid "**Corosync configuration file fragment for unicast (corosync.conf)**" -msgstr "**ユニキャスト向け Corosync 設定ファイルの断片 (corosync.conf)**" - -msgid "**Data loss**" -msgstr "**データロス**" - -msgid "**Example Corosync configuration file for multicast (corosync.conf)**" -msgstr "**マルチキャスト用の Corosync 設定ファイル例 (corosync.conf)**" - -msgid "**Node Address** Define the IP address of the cluster node." -msgstr "**ノードアドレス** クラスターノードの IP アドレスを定義します。" - -msgid "**Node Name** Define the logical name of the cluster node." -msgstr "**ノード名** クラスターノードの論理名を定義します。" - -msgid "**System downtime**" -msgstr "**システムの停止時間**" - -msgid "" -"**wsrep Provider** The Galera Replication Plugin serves as the wsrep " -"Provider for Galera Cluster. It is installed on your system as the " -"``libgalera_smm.so`` file. You must define the path to this file in your " -"``my.cnf``." -msgstr "" -"**wsrep Provider** Galera Replication Plugin は、Galera Cluster の wsrep " -"Provider として動作します。お使いのシステムに ``libgalera_smm.so`` ファイルと" -"してインストールされます。このファイルへのパスを ``my.cnf`` に定義する必要が" -"あります。" - -msgid "/etc/neutron/neutron.conf parameters for high availability" -msgstr "高可用性のための /etc/neutron/neutron.conf のパラメーター" - -msgid "1-2" -msgstr "1-2" - -msgid "100 GB" -msgstr "100 GB" - -msgid "100+ GB" -msgstr "100+ GB" - -msgid "2" -msgstr "2" - -msgid "2 or more" -msgstr "2 以上" - -msgid "2-4+" -msgstr "2-4+" - -msgid "8 GB" -msgstr "8 GB" - -msgid "8+ GB" -msgstr "8+ GB" - -msgid ":command:`# /etc/init.d/corosync start` (LSB)" -msgstr ":command:`# /etc/init.d/corosync start` (LSB)" - -msgid ":command:`# /etc/init.d/pacemaker start` (LSB)" -msgstr ":command:`# /etc/init.d/pacemaker start` (LSB)" - -msgid ":command:`# apt-get install rabbitmq-server`" -msgstr ":command:`# apt-get install rabbitmq-server`" - -msgid ":command:`# service corosync start` (LSB, alternate)" -msgstr ":command:`# service corosync start` (LSB, 別の方法)" - -msgid ":command:`# service pacemaker start` (LSB, alternate)" -msgstr ":command:`# service pacemaker start` (LSB, 別の方法)" - -msgid ":command:`# start corosync` (upstart)" -msgstr ":command:`# start corosync` (upstart)" - -msgid ":command:`# start pacemaker` (upstart)" -msgstr ":command:`# start pacemaker` (upstart)" - -msgid ":command:`# systemctl start corosync` (systemd)" -msgstr ":command:`# systemctl start corosync` (systemd)" - -msgid ":command:`# systemctl start pacemaker` (systemd)" -msgstr ":command:`# systemctl start pacemaker` (systemd)" - -msgid ":command:`# yum install rabbitmq-server`" -msgstr ":command:`# yum install rabbitmq-server`" - -msgid ":command:`# zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo`" -msgstr ":command:`# zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo`" - -msgid ":command:`# zypper install rabbitmq-server`" -msgstr ":command:`# zypper install rabbitmq-server`" - -msgid "" -":command:`crm configure` supports batch input so you may copy and paste the " -"above lines into your live Pacemaker configuration, and then make changes as " -"required. For example, you may enter edit ``p_ip_keystone`` from the :" -"command:`crm configure` menu and edit the resource to match your preferred " -"virtual IP address." -msgstr "" -":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " -"pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。例えば、" -"お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メニュー" -"から ``edit p_ip_keystone`` と入力し、リソースを編集できます。" - -msgid "" -":ref:`Configure OpenStack services to use Rabbit HA queues `" -msgstr "" -":ref:`RabbitMQ HA キューを使用するための OpenStack サービスの設定 `" - -msgid ":ref:`Configure RabbitMQ for HA queues`" -msgstr ":ref:`高可用性 キュー用の RabbitMQ の設定 `" - -msgid ":ref:`Install RabbitMQ`" -msgstr ":ref:`RabbitMQ のインストール`" - -msgid ":ref:`Neutron DHCP agent`" -msgstr ":ref:`Neutron DHCP エージェント `" - -msgid ":ref:`Neutron L3 agent`" -msgstr ":ref:`Neutron L3 エージェント `" - -msgid ":ref:`Neutron LBaaS` (Load Balancing as a Service) agent" -msgstr "" -":ref:`Neutron LBaaS` (Load Balancing as a Service) エージェン" -"ト" - -msgid ":ref:`Neutron metadata agent`" -msgstr ":ref:`Neutron メタデータエージェント `" - -msgid ":ref:`corosync-multicast`" -msgstr ":ref:`corosync-multicast`" - -msgid ":ref:`corosync-unicast`" -msgstr ":ref:`corosync-unicast`" - -msgid ":ref:`corosync-votequorum`" -msgstr ":ref:`corosync-votequorum`" - -msgid ":ref:`glance-api-configure`" -msgstr ":ref:`glance-api-configure`" - -msgid ":ref:`glance-api-pacemaker`" -msgstr ":ref:`glance-api-pacemaker`" - -msgid ":ref:`glance-services`" -msgstr ":ref:`glance-services`" - -msgid ":ref:`ha-cinder-configure`" -msgstr ":ref:`ha-cinder-configure`" - -msgid ":ref:`ha-cinder-pacemaker`" -msgstr ":ref:`ha-cinder-pacemaker`" - -msgid ":ref:`ha-cinder-services`" -msgstr ":ref:`ha-cinder-services`" - -msgid ":ref:`ha-manila-configure`" -msgstr ":ref:`ha-manila-configure`" - -msgid ":ref:`ha-manila-pacemaker`" -msgstr ":ref:`ha-manila-pacemaker`" - -msgid ":ref:`ha-manila-services`" -msgstr ":ref:`ha-manila-services`" - -msgid ":ref:`keystone-config-identity`" -msgstr ":ref:`keystone-config-identity`" - -msgid ":ref:`keystone-pacemaker`" -msgstr ":ref:`keystone-pacemaker`" - -msgid ":ref:`keystone-services-config`" -msgstr ":ref:`keystone-services-config`" - -msgid ":ref:`pacemaker-cluster-properties`" -msgstr ":ref:`pacemaker-cluster-properties`" - -msgid ":ref:`pacemaker-corosync-setup`" -msgstr ":ref:`pacemaker-corosync-setup`" - -msgid ":ref:`pacemaker-corosync-start`" -msgstr ":ref:`pacemaker-corosync-start`" - -msgid ":ref:`pacemaker-install`" -msgstr ":ref:`pacemaker-install`" - -msgid ":ref:`pacemaker-start`" -msgstr ":ref:`pacemaker-start`" - -msgid ":ref:`search`" -msgstr ":ref:`search`" - -msgid "" -":term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack internal " -"stateful communication service." -msgstr "" -":term:`Advanced Message Queuing Protocol (AMQP)` は、OpenStack 内部のステート" -"フルな通信サービスを提供します。" - -msgid ":term:`active/active configuration`" -msgstr ":term:`アクティブ/アクティブ設定 `" - -msgid ":term:`active/passive configuration`" -msgstr ":term:`アクティブ/パッシブ設定 `" - -msgid "" -"A crucial aspect of high availability is the elimination of single points of " -"failure (SPOFs). A SPOF is an individual piece of equipment or software that " -"causes system downtime or data loss if it fails. In order to eliminate " -"SPOFs, check that mechanisms exist for redundancy of:" -msgstr "" -"高可用性の重要な側面は、単一障害点 (SPOF) を減らすことです。SPOF は、障害が発" -"生した場合にシステム停止やデータ損失を引き起こす、設備やソフトウェアの個々の" -"部品です。SPOF を削減するために、以下の冗長性に対するメカニズムを確認します。" - -msgid "" -"A sample votequorum service configuration in the :file:`corosync.com` file " -"is:" -msgstr ":file:`corosync.com` ファイルの votequorum サービス設定例:" - -msgid "" -"A service that provides a response after your request and then requires no " -"further attention. To make a stateless service highly available, you need to " -"provide redundant instances and load balance them. OpenStack services that " -"are stateless include ``nova-api``, ``nova-conductor``, ``glance-api``, " -"``keystone-api``, ``neutron-api`` and ``nova-scheduler``." -msgstr "" -"リクエストに応答して、その後さらなる注意を必要としないサービス。ステートレス" -"なサービスを高可用化するために、複数のインスタンスを配備して、負荷分散する必" -"要があります。ステートレスな OpenStack サービスに ``nova-api``、``nova-" -"conductor``、``glance-api``、``keystone-api``、``neutron-api``、``nova-" -"scheduler`` があります。" - -msgid "" -"A service where subsequent requests to the service depend on the results of " -"the first request. Stateful services are more difficult to manage because a " -"single action typically involves more than one request, so simply providing " -"additional instances and load balancing does not solve the problem. For " -"example, if the horizon user interface reset itself every time you went to a " -"new page, it would not be very useful. OpenStack services that are stateful " -"include the OpenStack database and message queue. Making stateful services " -"highly available can depend on whether you choose an active/passive or " -"active/active configuration." -msgstr "" -"最初のリクエストの結果に応じて、後続のリクエストがあるサービス。ステートフル" -"サービスは、あるアクションが一般的に複数のリクエストに影響するため、管理する" -"ことが難しいです。そのため、単純に追加インスタンスを配備して負荷分散するだけ" -"では、問題を解決できません。例えば、horizon ユーザーインターフェースが、新し" -"いページを開くたびに毎回リセットされると、ほとんど役に立たないでしょう。ス" -"テートフルな OpenStack サービスには、OpenStack のデータベース、メッセージ" -"キューがあります。ステートレスなサービスの高可用化には、アクティブ/パッシブま" -"たはアクティブ/アクティブな設定のどちらを選択するかに依存する可能性がありま" -"す。" - -msgid "" -"A shared implementation and calculation of `quorum `_." -msgstr "" -"`クォーラム `_ の" -"共有実装と計算" - -msgid "" -"A typical active/active installation for a stateful service includes " -"redundant services, with all instances having an identical state. In other " -"words, updates to one instance of a database update all other instances. " -"This way a request to one instance is the same as a request to any other. A " -"load balancer manages the traffic to these systems, ensuring that " -"operational systems always handle the request." -msgstr "" -"一般的にステートレスサービスをアクティブ / アクティブにインストールすること" -"は、すべてのインスタンスが同じ状態を持つ冗長なサービスになることを含みます。" -"別の言い方をすると、あるインスタンスのデータベースの更新は、他のすべてのイン" -"スタンスも更新されます。このように、あるインスタンスへのリクエストは、他への" -"リクエストと同じです。ロードバランサーがこれらのシステムのトラフィックを管理" -"し、利用可能なシステムが常にリクエストを確実に処理します。" - -msgid "" -"A typical active/passive installation for a stateful service maintains a " -"replacement resource that can be brought online when required. Requests are " -"handled using a :term:`virtual IP` address (VIP) that facilitates returning " -"to service with minimal reconfiguration. A separate application (such as " -"Pacemaker or Corosync) monitors these services, bringing the backup online " -"as necessary." -msgstr "" -"一般的にステートレスサービスをアクティブ / パッシブにインストールすると、必要" -"に応じてオンラインにできる置換リソースを維持します。リクエストは、サービスの" -"最小限の再設定により返す機能を持つ :term:`仮想 IP ` アドレス " -"(VIP) を使用して処理されます。 独立したアプリケーション (Pacemaker や " -"Corosync など) がこれらのサービスを監視し、必要に応じてバックアップ側をオンラ" -"インにします。" - -msgid "API isolation" -msgstr "API 分離" - -msgid "" -"Ability to take periodic \"snap shots\" throughout the installation process " -"and \"roll back\" to a working configuration in the event of a problem." -msgstr "" -"インストールプロセス以降、定期的な「スナップショット」を取得する機能、および" -"問題発生時に動作する設定に「ロールバック」する機能があります。" - -msgid "Abstract" -msgstr "概要" - -msgid "" -"Access to RabbitMQ is not normally handled by HAproxy. Instead, consumers " -"must be supplied with the full list of hosts running RabbitMQ with " -"``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` option." -msgstr "" -"RabbitMQ へのアクセスは、通常 HAproxy により取り扱われません。利用者は代わり" -"に、 ``rabbit_hosts`` を用いて RabbitMQ を実行しているホストの一覧を指定し" -"て、 ``rabbit_ha_queues`` オプションを有効化する必要があります。" - -msgid "" -"Access to memcached is not handled by HAproxy because replicated access is " -"currently only in an experimental state. Instead OpenStack services must be " -"supplied with the full list of hosts running memcached." -msgstr "" -"重複アクセスは現在実験的な位置づけのため、memcached へのアクセスは HAproxy を" -"利用しません。代わりに、OpenStack のサービスは memcached を実行しているホスト" -"をすべて指定する必要があります。" - -msgid "" -"Access via an HAProxy virtual IP, for services such as HTTPd that are " -"accessed via a TCP socket that can be load balanced" -msgstr "" -"HAProxy 仮想 IP 経由のアクセス、負荷分散できる TCP ソケット経由でアクセス可能" -"な HTTPd などのサービス向け。" - -msgid "Accidental deletion or destruction of data." -msgstr "意図しないデータの削除や破損。" - -msgid "Active/Passive vs Active/Active" -msgstr "アクティブ/パッシブとアクティブ/アクティブ" - -msgid "Add Block Storage API resource to Pacemaker" -msgstr "Block Storage API リソースの Pacemaker への追加" - -msgid "Add OpenStack Identity resource to Pacemaker" -msgstr "OpenStack Identity リソースの Pacemaker への追加" - -msgid "Add OpenStack Image API resource to Pacemaker" -msgstr "OpenStack Image API リソースの Pacemaker への追加" - -msgid "Add Shared File Systems API resource to Pacemaker" -msgstr "Shared File Systems API リソースの Pacemaker への追加" - -msgid "Add the Galera Cluster service:" -msgstr "Galera Cluster サービスを追加します。" - -msgid "Add the GnuPG key for the database repository that you want to use." -msgstr "使用したいデータベースのリポジトリーに GnuPG キーを追加します。" - -msgid "" -"Add the repository to your sources list. Using your preferred text editor, " -"create a ``galera.list`` file in the ``/etc/apt/sources.list.d/`` directory. " -"For the contents of this file, use the lines that pertain to the software " -"repository you want to install:" -msgstr "" -"リポジトリーをソースリストに追加します。お好きなテキストエディターを使用し" -"て、``/etc/apt/sources.list.d/`` ディレクトリーに ``galera.list`` を作成しま" -"す。このファイルの内容は、インストールしたいソフトウェアリポジトリーに関する" -"行を使用します。" - -msgid "Add the repository to your system:" -msgstr "リポジトリーをお使いのシステムに追加します。" - -msgid "Additional parameters" -msgstr "追加パラメーター" - -msgid "" -"After completing these steps, commit your configuration changes by entering :" -"command:`commit` from the :command:`crm configure` menu. Pacemaker then " -"starts the OpenStack Image API service and its dependent resources on one of " -"your nodes." -msgstr "" -"これらの手順の完了後、:command:`crm configure` メニューから :command:" -"`commit` と入力し、設定の変更をコミットします。Pacemaker は OpenStack Image " -"API サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"After installing the Corosync package, you must create the :file:`/etc/" -"corosync/corosync.conf` configuration file." -msgstr "" -"Corosync パッケージのインストール後、 :file:`/etc/corosync/corosync.conf` 設" -"定ファイルを作成する必要があります。" - -msgid "" -"After the Corosync services have been started and you have verified that the " -"cluster is communicating properly, you can start :command:`pacemakerd`, the " -"Pacemaker master control process:" -msgstr "" -"Corosync サービスが起動して、クラスターが正常に通信していることを確認した後、" -"Pacemaker のマスター制御プロセス :command:`pacemakerd` を起動できます。" - -msgid "" -"After the Pacemaker services have started, Pacemaker creates a default empty " -"cluster configuration with no resources. Use the :command:`crm_mon` utility " -"to observe the status of Pacemaker:" -msgstr "" -"Pacemaker サービスの起動後、Pacemaker がリソースを持たないデフォルトの空クラ" -"スターを作成します。 :command:`crm_mon` ユーティリティーを使用して、" -"Pacemaker の状態を確認します。" - -msgid "" -"After you add these resources, commit your configuration changes by " -"entering :command:`commit` from the :command:`crm configure` menu. Pacemaker " -"then starts the OpenStack Identity service and its dependent resources on " -"one of your nodes." -msgstr "" -"これらのリソースの追加後、:command:`crm configure` メニューから :command:" -"`commit` と入力し、設定の変更をコミットします。Pacemaker は OpenStack " -"Identity サービスおよび依存するリソースを同じノードに起動します。" - -msgid "After you make these changes, you may commit the updated configuration." -msgstr "これらの変更実行後、更新した設定を範囲する必要があるかもしれません。" - -msgid "" -"After you set up your Pacemaker cluster, you should set a few basic cluster " -"properties:" -msgstr "" -"Pacemaker クラスターのセットアップ後、いくつかの基本的なクラスターのプロパ" -"ティーを設定すべきです。" - -msgid "All routers are highly available by default." -msgstr "すべてのルーターは、デフォルトで高可用性になっています。" - -msgid "" -"Almost all services in this stack benefit from being proxied. Using a proxy " -"server provides:" -msgstr "" -"このスタックのほぼすべてのサービスは、プロキシーする恩恵を受けられます。プロ" -"キシーサーバを使用することにより、以下が提供されます。" - -msgid "" -"Alternatively, if the database server is running, use the " -"``wsrep_last_committed`` status variable:" -msgstr "" -"代わりに、データベースサーバーが動作している場合、 ``wsrep_last_committed`` " -"状態変数を使用します。" - -msgid "" -"Alternatively, instead of using systemd agents, download and install the OCF " -"resource agent:" -msgstr "" -"または、systemd エージェントを使用する代わりに、OCF リソースエージェントをダ" -"ウンロードしてインストールします。" - -msgid "" -"An AMQP (Advanced Message Queuing Protocol) compliant message bus is " -"required for most OpenStack components in order to coordinate the execution " -"of jobs entered into the system." -msgstr "" -"AMQP (Advanced Message Queuing Protocol) 互換メッセージバスが、システム内の" -"ジョブ実行を調整するために、ほとんどの OpenStack コンポーネントに必要となりま" -"す。" - -msgid "An OpenStack environment includes multiple data pools for the VMs:" -msgstr "OpenStack 環境は、仮想マシン向けの複数のデータプールがあります。" - -msgid "" -"And the quorum could also have been set to three, just as a configuration " -"example." -msgstr "また、クォーラムが、設定例にあるように 3 つに設定されているでしょう。" - -msgid "AppArmor" -msgstr "AppArmor" - -msgid "AppArmor now permits Galera Cluster to operate." -msgstr "AppArmor により Galera Cluster の動作を許可されます。" - -msgid "" -"Application Armor is a kernel module for improving security on Linux " -"operating systems. It is developed by Canonical and commonly used on Ubuntu-" -"based distributions. In the context of Galera Cluster, systems with AppArmor " -"may block the database service from operating normally." -msgstr "" -"Application Armor は、Linux オペレーティングシステムにおいてセキュリティーを" -"向上するためのカーネルモジュールです。Canonical により開発され、一般的に " -"Ubuntu 系のディストリビューションにおいて使用されています。Galera Cluster の" -"観点では、AppArmor を有効化したシステムは、データベースサービスが正常に動作す" -"ることを妨げる可能性があります。" - -msgid "Applications and automatic service migration" -msgstr "アプリケーションおよびサービスの自動的なマイグレーション" - -msgid "Architecture limitations" -msgstr "アーキテクチャーの制限" - -msgid "" -"As another option to make RabbitMQ highly available, RabbitMQ contains the " -"OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. " -"It provides the active/active RabbitMQ cluster with mirrored queues. For " -"more information, see `Auto-configuration of a cluster with a Pacemaker " -"`_." -msgstr "" -"RabbitMQ を高可用化する別の選択肢として、RabbitMQ バージョン 3.5.7 以降、" -"Pacemaker クラスターリソースエージェント向けの OCF スクリプトが含まれます。ア" -"クティブ/アクティブ RabbitMQ クラスターにミラーキューを提供します。詳細は " -"`Auto-configuration of a cluster with a Pacemaker `_ を参照してください。" - -msgid "" -"At its core, a cluster is a distributed finite state machine capable of co-" -"ordinating the startup and recovery of inter-related services across a set " -"of machines." -msgstr "" -"クラスターは、その中心において、複数のセットのマシン間で関連するサービスのス" -"タートアップとリカバリーを調整する機能を持つ、分散有限状態マシンです。" - -msgid "Automated recovery of failed instances" -msgstr "障害インスタンスの自動復旧" - -msgid "Awareness of instances on other machines" -msgstr "他のマシンにあるインスタンスの把握" - -msgid "Awareness of other applications in the stack" -msgstr "スタックにある他のアプリケーションの認識" - -msgid "" -"Bear in mind that the Percona repository only supports Red Hat Enterprise " -"Linux and CentOS distributions." -msgstr "" -"Percona リポジトリーは Red Hat Enterprise Linux と CentOS のみをサポートする" -"ことを心にとどめておいてください。" - -msgid "" -"Bear in mind, leaving SELinux in permissive mode is not a good security " -"practice. Over the longer term, you need to develop a security policy for " -"Galera Cluster and then switch SELinux back into enforcing mode." -msgstr "" -"SELinux を permissive モードにすることは、良いセキュリティー慣行ではないこと" -"を覚えておいてください。長い間、Galera Cluster のセキュリティーポリシーを開発" -"して、SELinux を enforcing モードに切り替える必要があります。" - -msgid "" -"Bear in mind, while setting this parameter to ``1`` or ``2`` can improve " -"performance, it introduces certain dangers. Operating system failures can " -"erase the last second of transactions. While you can recover this data from " -"another node, if the cluster goes down at the same time (in the event of a " -"data center power outage), you lose this data permanently." -msgstr "" -"このパラメーターを ``1`` か ``2`` に設定することにより、性能を改善できます" -"が、ある種の危険性があることを覚えておいてください。オペレーティングシステム" -"の障害が、最後の数秒のトランザクションを消去する可能性があります。このデータ" -"を他のノードから復旧することもできますが、クラスターが同時に停止した場合 " -"(データセンターの電源障害時)、このデータを完全に失います。" - -msgid "Before you attempt this, verify that you have the following ready:" -msgstr "これを試す前に、以下の準備ができていることを確認します。" - -msgid "" -"Before you launch Galera Cluster, you need to configure the server and the " -"database to operate as part of the cluster." -msgstr "" -"Galera クラスターを起動する前に、クラスターの一部として動作するよう、サーバー" -"とデータベースを設定する必要があります。" - -msgid "" -"Block Storage service (cinder) can use LVM or Ceph RBD as the storage back " -"end." -msgstr "" -"Block Storage サービス (cinder) は、ストレージバックエンドとして LVM や Ceph " -"RBD を使用できます。" - -msgid "" -"Both the central and the compute agent can run in an HA deployment, which " -"means that multiple instances of these services can run in parallel with " -"workload partitioning among these running instances." -msgstr "" -"中央エージェントとコンピュートエージェントの両方は、高可用性で動作できます。" -"これらのサービスの複数のインスタンスが、これらを実行しているインスタンス間で" -"並行して負荷分散できることを意味します。" - -msgid "" -"Built-in application clustering, when available from the application. Galera " -"is one example of this." -msgstr "" -"アプリケーション組み込みクラスター、アプリケーションから利用できる場合、" -"Galera がこの例になる。" - -msgid "" -"By default, `controller1` handles the caching service but, if the host goes " -"down, `controller2` does the job. For more information about memcached " -"installation, see the `OpenStack Cloud Administrator Guide `_." -msgstr "" -"デフォルトで、 `controller1` がキャッシュサービスを処理しますが、ホストが停止" -"している場合、 `controller2` がジョブを実行します。memcached のインストールの" -"詳細は `OpenStack Cloud Administrator Guide `_ を参照してください。" - -msgid "" -"By default, cluster nodes do not start as part of a Primary Component. " -"Instead they assume that one exists somewhere and attempts to establish a " -"connection with it. To create a Primary Component, you must start one " -"cluster node using the ``--wsrep-new-cluster`` option. You can do this using " -"any cluster node, it is not important which you choose. In the Primary " -"Component, replication and state transfers bring all databases to the same " -"state." -msgstr "" -"クラスターノードは、デフォルトで Primary Component の一部として起動しません。" -"代わりに、それがどこかに存在すると仮定し、そこへの接続を確立しようとします。" -"1 つのクラスターノードを ``--wsrep-new-cluster``オプションを付けて起動して、" -"Primary Component を作成する必要があります。任意のクラスターノードを使用して" -"実行でき、どれを選択するかは重要ではありません。Primary Component において、" -"レプリケーションと状態転送により、すべてのデータベースが同じ状態になります。" - -msgid "Ceph" -msgstr "Ceph" - -msgid "" -"Ceph RBD provides object replication capabilities by storing Block Storage " -"volumes as Ceph RBD objects; Ceph RBD ensures that each replica of an object " -"is stored on a different node. This means that your volumes are protected " -"against hard drive and node failures or even the failure of the data center " -"itself." -msgstr "" -"Ceph RBD は、Ceph RBD オブジェクトとして Block Storage のボリュームを保存する" -"ことにより、オブジェクトレプリケーション機能を提供します。オブジェクトの各レ" -"プリカが別々のノードに保存されることを保証します。このことは、お使いのボ" -"リュームがハードディスクやノードの障害時、データセンター自体の障害時にも保護" -"されることを意味します。" - -msgid "" -"Certain services running on the underlying operating system of your " -"OpenStack database may block Galera Cluster from normal operation or prevent " -"``mysqld`` from achieving network connectivity with the cluster." -msgstr "" -"OpenStack データベースのベースとなるオペレーティングシステムで動作している特" -"定のサービスは、Galera Cluster が通常の動作をブロックしたり、``mysqld`` がク" -"ラスターとのネットワーク接続を妨害したりする可能性があります。" - -msgid "Change the number of expected votes for a cluster to be quorate" -msgstr "クラスターが定数になるために期待されるボート数を変更します" - -msgid "Change the number of votes assigned to a node" -msgstr "ノードに割り当てられたボート数を変更します" - -msgid "" -"Cinder provides 'block storage as a service' suitable for performance " -"sensitive scenarios such as databases, expandable file systems, or providing " -"a server with access to raw block level storage." -msgstr "" -"Cinder は、データベースなどの性能を必要とするシナリオ、拡張可能なファイルシス" -"テム、ローブロックレベルストレージにアクセスするサーバーに適するサービスとし" -"て「block storage as a service」を提供します。" - -msgid "Cinder-volume as a single point of failure" -msgstr "単一障害点としての cinder-volume" - -msgid "Clusters and quorums" -msgstr "クラスターとクォーラム" - -msgid "Collapsed" -msgstr "Collapsed" - -# #-#-#-#-# compute-manage-volumes.pot (Cloud Administrator Guide 0.9) -# #-#-#-#-# -# #-#-#-#-# networking_adv-features.pot (Cloud Administrator Guide 0.9) -# #-#-#-#-# -# #-#-#-#-# networking_config-agents.pot (Cloud Administrator Guide 0.9) -# #-#-#-#-# -# #-#-#-#-# networking_use.pot (Cloud Administrator Guide 0.9) #-#-#-#-# -msgid "Command" -msgstr "コマンド" - -msgid "Common deployement architectures" -msgstr "一般的な配備のアーキテクチャー" - -msgid "Configuration" -msgstr "設定" - -msgid "Configuration tips" -msgstr "設定のヒント" - -msgid "Configure Block Storage API service" -msgstr "Block Storage API サービスの設定" - -msgid "Configure NTP" -msgstr "NTP の設定" - -msgid "Configure OpenStack Identity service" -msgstr "OpenStack Identity Service の設定" - -msgid "Configure OpenStack Image service API" -msgstr "OpenStack Image サービス API の設定" - -msgid "Configure OpenStack services to use HA Shared File Systems API" -msgstr "" -"高可用性 Shared File Systems API を使用するための OpenStack サービスの設定" - -msgid "Configure OpenStack services to use Rabbit HA queues" -msgstr "RabbitMQ HA キューを使用するための OpenStack サービスの設定" - -msgid "Configure OpenStack services to use highly available Block Storage API" -msgstr "高可用性 Block Storage API を使用するための OpenStack サービスの設定" - -msgid "" -"Configure OpenStack services to use highly available OpenStack Image API" -msgstr "" -"高可用性 OpenStack Image Service API を使用するための OpenStack サービスの設" -"定" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Identity" -msgstr "高可用性 OpenStack Identity を使用するための OpenStack サービスの設定" - -msgid "Configure RabbitMQ for HA queues" -msgstr "高可用性 キュー用の RabbitMQ の設定" - -msgid "Configure Shared File Systems API service" -msgstr "Shared File Systems API サービスの設定" - -msgid "Configure high availability on compute nodes" -msgstr "コンピュートノードにおける高可用性の設定" - -msgid "" -"Configure networking on each node. The `Networking `_ section of " -"the *Install Guide* includes basic information about configuring networking." -msgstr "" -"各ノードにおいてネットワークを設定します。ネットワーク設定に関する基本的な情" -"報は、インストールガイドの `Networking `_ セクションにあります。" - -msgid "Configure the VIP" -msgstr "仮想 IP の設定" - -msgid "Configuring Block Storage to listen on the VIP address" -msgstr "Block Storage がその仮想 IP アドレスをリッスンする設定" - -msgid "Configuring HAProxy" -msgstr "HAProxy の設定" - -msgid "Configuring InnoDB" -msgstr "InnoDB の設定" - -msgid "Configuring OpenStack services to use this IP address" -msgstr "OpenStack のサービスがこの IP アドレスを使用する設定" - -msgid "" -"Configuring RAID on the hard drives that implement storage protects your " -"data against a hard drive failure. If, however, the node itself fails, data " -"may be lost. In particular, all volumes stored on an LVM node can be lost." -msgstr "" -"ストレージを実装するハードディスクに RAID を設定することにより、ハードディス" -"ク障害からデータを保護します。しかしながら、ノード自体が故障した場合、データ" -"が失われるかもしれません。とくに、LVM ノードに保存されている全ボリュームは失" -"われる可能性があります。" - -msgid "Configuring Storage for high availability" -msgstr "ストレージの高可用性の設定" - -msgid "Configuring ``mysqld``" -msgstr "``mysqld`` の設定" - -msgid "Configuring non-core components for high availability" -msgstr "非コアコンポーネントの高可用性の設定" - -msgid "Configuring the compute node for high availability" -msgstr "コンピュートノードの高可用性の設定" - -msgid "Configuring the controller for high availability" -msgstr "コントローラーの高可用性の設定" - -msgid "Configuring the server" -msgstr "サーバーの設定" - -msgid "Configuring wsrep replication" -msgstr "wsrep レプリケーションの設定" - -msgid "" -"Connect an additional quorum device to allow small clusters remain quorate " -"during node outages" -msgstr "" -"追加のクォーラムデバイスを接続して、小規模なクラスターがノード障害時にクォー" -"ラムを取得できるようにします。" - -msgid "Contents" -msgstr "内容" - -msgid "" -"Corosync can be configured to work with either multicast or unicast IP " -"addresses or to use the votequorum library." -msgstr "" -"Corosync を動作させるための設定としては、マルチキャスト IP アドレスを使う、ユ" -"ニキャスト IP アドレスを使う、 votequorum ライブラリーを使う、の選択肢があり" -"ます。" - -msgid "" -"Corosync is started as a regular system service. Depending on your " -"distribution, it may ship with an LSB init script, an upstart job, or a " -"systemd unit file. Either way, the service is usually named corosync:" -msgstr "" -"Corosync は通常のシステムサービスとして起動します。お使いのディストリビュー" -"ションに応じて、LSB init スクリプト、upstart ジョブ、systemd ユニットファイル" -"を同梱しているかもしれません。どちらにしても、サービスは通常 corosync という" -"名前です。" - -msgid "" -"Create a ``Galera.repo`` file in the local directory. For Galera Cluster for " -"MySQL, use the following content:" -msgstr "" -"ローカルのディレクトリーに ``Galera.repo`` ファイルを作成します。Galera " -"Cluster for MySQL の場合、以下の内容を使用します。" - -msgid "" -"Create a configuration file for ``clustercheck`` at ``/etc/sysconfig/" -"clustercheck``:" -msgstr "" -"``clustercheck`` の設定ファイルを ``/etc/sysconfig/clustercheck`` に作成しま" -"す。" - -msgid "" -"Create a configuration file for the HAProxy monitor service, at ``/etc/" -"xinetd.d/galera-monitor``:" -msgstr "" -"HAProxy モニターサービスの設定ファイルを ``/etc/xinetd.d/galera-monitor`` に" -"作成します。" - -msgid "" -"Create a symbolic link for the database server in the ``disable`` directory:" -msgstr "" -"``disable`` ディレクトリーにデータベースサーバーへのシンボリックリンクを作成" -"します。" - -msgid "Create the cluster, giving it a name, and start it:" -msgstr "名前を指定してクラスターを作成し、起動します。" - -msgid "" -"Currently, no native feature is provided to make the LBaaS agent highly " -"available using the default plug-in HAProxy. A common way to make HAProxy " -"highly available is to use the VRRP (Virtual Router Redundancy Protocol). " -"Unfortunately, this is not yet implemented in the LBaaS HAProxy plug-in." -msgstr "" -"現在、デフォルトのプラグイン HAProxy を使用して、LBaaS エージェントを高可用化" -"する組み込み機能はありません。HAProxy を高可用化する一般的な方法は、VRRP " -"(Virtual Router Redundancy Protocol) を使用することです。残念ながら、これはま" -"だ LBaaS HAProxy プラグインに実装されていません。" - -msgid "" -"Data integrity through fencing (a non-responsive process does not imply it " -"is not doing anything)" -msgstr "" -"フェンシングによるデータ完全性 (応答なしプロセスが何もしていないことを意味し" -"ます)" - -msgid "Database" -msgstr "データベース" - -msgid "Database (Galera Cluster)" -msgstr "データベース (Galera Cluster)" - -msgid "Database configuration" -msgstr "データベース設定" - -msgid "" -"Database hosts with Galera Cluster installed. You need a minimum of three " -"hosts;" -msgstr "" -"Galera Cluster クラスターがインストールされたデータベースホスト。少なくとも " -"3 つのホストが必要です。" - -msgid "Debian" -msgstr "Debian" - -msgid "" -"Define the InnoDB memory buffer pool size. The default value is 128 MB, but " -"to compensate for Galera Cluster's additional memory usage, scale your usual " -"value back by 5%:" -msgstr "" -"InnoDB メモリーバッファープールサイズを定義します。デフォルト値は 128 MB です" -"が、Galera Cluster の追加メモリー使用状況に対して補うために、通常の値を 5% ま" -"でスケールさせてください。" - -msgid "" -"Depending on the method used to communicate with the service, the following " -"availability strategies will be followed:" -msgstr "" -"サービスが通信するために使用するメソッドに応じて、以下の可用性の戦略に従いま" -"す。" - -msgid "Deployment flavors" -msgstr "デプロイフレーバー" - -msgid "Deployment strategies" -msgstr "デプロイ戦略" - -msgid "Description" -msgstr "説明" - -msgid "Distribution" -msgstr "ディストリビューション" - -msgid "" -"Do not change this value. Other modes may cause ``INSERT`` statements on " -"tables with auto-increment columns to fail as well as unresolved deadlocks " -"that leave the system unresponsive." -msgstr "" -"この値を変更してはいけません。他のモジュールが、自動インクリメントの列を用い" -"てテーブルに ``INSERT`` ステートメントを発行するかもしれません。これは、シス" -"テムが応答不可になる解決不能なデッドロックに陥ります。" - -msgid "Do this configuration on all services using RabbitMQ:" -msgstr "RabbitMQ を使用するすべてのサービスでこの設定を行います。" - -msgid "" -"Each configured interface must have a unique ``ringnumber``, starting with 0." -msgstr "" -"設定済みの各インターフェースは、0 から始まる一意な ``ringnumber`` を持つ必要" -"があります。" - -msgid "Each instance has its own IP address;" -msgstr "各インスタンスは、自身の IP アドレスを持ちます。" - -msgid "" -"Each instance of HAProxy configures its front end to accept connections only " -"from the virtual IP (VIP) address and to terminate them as a list of all " -"instances of the corresponding service under load balancing, such as any " -"OpenStack API service." -msgstr "" -"HAProxy の各インスタンスは、仮想 IP アドレスからの接続のみを受け付け、" -"OpenStack API サービスなど、負荷分散するサービスの全インスタンスの一覧に振り" -"分けるよう、そのフロントエンドを設定します。" - -msgid "" -"Each service also has a backup but manages both the main and redundant " -"systems concurrently. This way, if there is a failure, the user is unlikely " -"to notice. The backup system is already online and takes on increased load " -"while the main system is fixed and brought back online." -msgstr "" -"各サービスはバックアップも持ちますが、メインと冗長システムを同時に管理しま" -"す。このように、ユーザーが気が付かない障害が発生した場合、バックアップシステ" -"ムはすでにオンラインであり、メインシステムが復旧され、オンラインになるまでの" -"間は負荷が高くなります。" - -msgid "" -"Edit the :file:`/etc/glance/glance-api.conf` file to configure the OpenStack " -"image service:" -msgstr "" -":file:`/etc/glance/glance-api.conf` ファイルを編集して、OpenStack Image サー" -"ビスを設定します。" - -msgid "Edit the :file:`/etc/manila/manila.conf` file:" -msgstr "`/etc/manila/manila.conf` ファイルを編集します。" - -msgid "" -"Edit the :file:`keystone.conf` file to change the values of the :manpage:" -"`bind(2)` parameters:" -msgstr "" -":file:`keystone.conf` ファイルを編集して、 :manpage:`bind(2)` パラメーターの" -"値を変更します。" - -msgid "Edit the ``/etc/cinder/cinder.conf`` file:" -msgstr "``/etc/cinder/cinder.conf`` ファイルを編集します。" - -msgid "Enabling the repository" -msgstr "リポジトリーの有効化" - -msgid "Enhanced failure detection" -msgstr "高度な障害検出" - -msgid "" -"Ensure that the InnoDB locking mode for generating auto-increment values is " -"set to ``2``, which is the interleaved locking mode." -msgstr "" -"自動インクリメント値を生成するための InnoDB ロックモードがをきちんと``2`` に" -"設定してください。これは、インターリーブ・ロックモードです。" - -msgid "" -"Ensure that the InnoDB log buffer is written to file once per second, rather " -"than on each commit, to improve performance:" -msgstr "" -"パフォーマンスを改善するために、InnoDB ログバッファーが、コミットごとではな" -"く、1 秒ごとにファイルに書き込むことを確認します。" - -msgid "" -"Ensure that the binary log format is set to use row-level replication, as " -"opposed to statement-level replication:" -msgstr "" -"バイナリーログ形式が、ステートメントレベルのレプリケーションではなく、行レベ" -"ルのレプリケーションに設定されていることを確認してください。" - -msgid "" -"Ensure that the database server is not bound only to to the localhost, " -"``127.0.0.1``. Instead, bind it to ``0.0.0.0`` to ensure it listens on all " -"available interfaces." -msgstr "" -"データベースサーバーが localhost や ``127.0.0.1`` のみにバインドされていない" -"ことを確認してください。代わりに、すべてのインターフェースをきちんとリッスン" -"するよう、 ``0.0.0.0`` にバインドしてください。" - -msgid "Ensure that the default storage engine is set to InnoDB:" -msgstr "デフォルトのストレージエンジンをきちんと InnoDB に設定してください。" - -msgid "" -"Ephemeral storage is allocated for an instance and is deleted when the " -"instance is deleted. The Compute service manages ephemeral storage. By " -"default, Compute stores ephemeral drives as files on local disks on the " -"Compute node but Ceph RBD can instead be used as the storage back end for " -"ephemeral storage." -msgstr "" -"一時ストレージは、インスタンスのために割り当てられ、インスタンスの削除時に削" -"除されます。Compute サービスが一時ストレージを管理します。Compute はデフォル" -"トで、コンピュートノードのローカルディスクにファイルとして一時ディスクを保存" -"します。代わりに、Ceph RBD が一時ストレージのストレージバックエンドとして使用" -"できます。" - -msgid "Example Config File" -msgstr "サンプル設定ファイル" - -msgid "Example configuration with two hosts:" -msgstr "2 ホストでの設定例" - -msgid "Facility services such as power, air conditioning, and fire protection" -msgstr "電源、空調、防火などに関する設備" - -msgid "Firewall" -msgstr "ファイアウォール" - -msgid "" -"For Debian and Debian-based distributions, such as Ubuntu, complete the " -"following steps:" -msgstr "" -"Debian および、Ubuntu などの Debian 系のディストリビューションは、以下の手順" -"を実行してください。" - -msgid "" -"For Debian and Debian-based distributions, such as Ubuntu, run the following " -"command:" -msgstr "" -"Debian および、Ubuntu などの Debian 系のディストリビューションは、以下のコマ" -"ンドを実行してください。" - -msgid "" -"For Galera Cluster for MySQL, using your preferred text editor, create a " -"``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory." -msgstr "" -"Galera Cluster for MySQL の場合、お好きなテキストエディターを使用して、 ``/" -"etc/yum.repos.d/`` ディレクトリーに ``Galera.repo`` ファイルを作成します。" - -msgid "For Kilo and beyond, focus on L3HA and DVR." -msgstr "Kilo 以降、L3HA と DVR に注力します。" - -msgid "" -"For Liberty, we do not have the standalone network nodes in general. We " -"usually run the Networking services on the controller nodes. In this guide, " -"we use the term \"network nodes\" for convenience." -msgstr "" -"Liberty の場合、独立したネットワークノードを一般的に持ちません。よくコント" -"ローラーノードにおいて Networking サービスを実行します。このガイドでは、便宜" -"上、「ネットワークノード」という言葉を使用します。" - -msgid "For MariaDB Galera Cluster, instead use this content:" -msgstr "MariaDB Galera Cluster の場合、代わりに以下の内容を使用します。" - -msgid "" -"For MariaDB Galera Cluster, using your preferred text editor, create a " -"``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory." -msgstr "" -"MariaDB Galera Cluster の場合、お好きなテキストエディターを使用して、 ``/etc/" -"yum.repos.d/`` ディレクトリーに ``Galera.repo`` ファイルを作成します。" - -msgid "" -"For OpenStack Compute, for example, if your OpenStack Identiy service IP " -"address is 10.0.0.11, use the following configuration in your :file:`api-" -"paste.ini` file:" -msgstr "" -"例えば、OpenStack Compute の場合、OpenStack Image API サービスの IP アドレス" -"が 10.0.0.11 ならば、以下の設定を :file:`api-paste.ini` ファイルに使用しま" -"す。" - -msgid "" -"For OpenStack Compute, for example, if your OpenStack Image API service IP " -"address is 10.0.0.11 (as in the configuration explained here), you would use " -"the following configuration in your :file:`nova.conf` file:" -msgstr "" -"例えば、OpenStack Compute の場合、OpenStack Image API サービスの IP アドレス" -"が (ここで説明されている設定のように) 10.0.0.11 ならば、以下の設定を :file:" -"`nova.conf` ファイルに使用します。" - -msgid "For Percona XtraDB Cluster, run the following command:" -msgstr "Percona XtraDB Cluster の場合、以下のコマンドを実行します。" - -msgid "" -"For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the " -"process is more straightforward. In this file, only enter the text for the " -"repository you want to use." -msgstr "" -"Red Hat Enterprise Linux および Red Hat 系のディストリビューションは、手順は" -"もっとシンプルです。このファイルに、使用したいリポジトリーのテキストを入力す" -"るだけです。" - -msgid "" -"For Red Hat Enterprise Linux and Red Hat-based distributions, such as Fedora " -"or CentOS, instead run this command:" -msgstr "" -"Red Hat Enterprise Linux および Fedora や CentOS などの Red Hat 系ディストリ" -"ビューションの場合、このコマンドを代わりに実行してください。" - -msgid "" -"For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. You should " -"verify the fingerprint of the imported GPG key before using it." -msgstr "" -"SLES 12 の場合、パッケージは GPG キー 893A90DAD85F9316 により署名されていま" -"す。使用する前に、インポートした GPG キーのフィンガープリントを検証すべきで" -"す。" - -msgid "" -"For SUSE Enterprise Linux Server and SUSE-based distributions, such as " -"openSUSE, instead run this command:" -msgstr "" -"SUSE Enterprise Linux Server および openSUSE などの SUSE 系ディストリビュー" -"ションの場合、このコマンドを代わりに実行してください。" - -msgid "" -"For SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE " -"binary installations are only available for Galera Cluster for MySQL and " -"MariaDB Galera Cluster." -msgstr "" -"SUSE Enterprise Linux や openSUSE などの SUSE 系ディストリビューションのバイ" -"ナリーインストールの場合、Galera Cluster for MySQL と MariaDB Galera Cluster " -"のみ利用可能です。" - -msgid "" -"For UDPU, every node that should be a member of the membership must be " -"specified." -msgstr "" -"UDPUでは、全てのノードがメンバーシップメンバーを指定しなければなりません。" - -msgid "" -"For Ubuntu, you should also enable the Corosync service in the ``/etc/" -"default/corosync`` configuration file." -msgstr "" -"Ubuntu の場合、 ``/etc/default/corosync`` 設定ファイルにおいて Corosync サー" -"ビスも有効化すべきです。" - -msgid "For ``crmsh``:" -msgstr "``crmsh`` の場合:" - -msgid "For ``pcs``:" -msgstr "``pcs`` の場合:" - -msgid "" -"For a complete list of the available parameters, run the ``SHOW VARIABLES`` " -"command from within the database client:" -msgstr "" -"利用できるパラメーターの一覧は、データベースクライアントから ``SHOW " -"VARIABLES`` コマンドを実行してください。" - -msgid "" -"For backward compatibility and supporting existing deployments, the central " -"agent configuration also supports using different configuration files for " -"groups of service instances of this type that are running in parallel. For " -"enabling this configuration, set a value for the partitioning_group_prefix " -"option in the `central section `__ in the " -"OpenStack Configuration Reference." -msgstr "" -"既存の環境の後方互換性とサポートのために、中央エージェントの設定は、並列で実" -"行しているこの種のサービスインスタンスのグループのために、別の設定ファイルを" -"使用することもサポートされます。この設定を有効化するために、OpenStack " -"Configuration Reference の `central section `__ にある partitioning_group_prefix オプションの値を設定します。" - -msgid "" -"For demonstrations and studying, you can set up a test environment on " -"virtual machines (VMs). This has the following benefits:" -msgstr "" -"デモや学習の場合、仮想マシンにテスト環境をセットアップできます。これには以下" -"の利点があります。" - -msgid "" -"For detailed instructions about installing HAProxy on your nodes, see its " -"`official documentation `_." -msgstr "" -"お使いのノードに HAProxy をインストールする方法の詳細は `公式ドキュメント " -"`_ を参照してください。" - -msgid "" -"For each cluster node, run the following commands, replacing ``NODE-IP-" -"ADDRESS`` with the IP address of the cluster node you want to open the " -"firewall to:" -msgstr "" -"各クラスターノード向けに、以下のコマンドを実行します。``NODE-IP-ADDRESS`` を" -"ファイアウォールを開きたいクラスターノードの IP アドレスで置き換えます。" - -msgid "" -"For each entry: Replace all instances of ``DISTRO`` with the distribution " -"that you use, such as ``debian`` or ``ubuntu``. Replace all instances of " -"``RELEASE`` with the release of that distribution, such as ``wheezy`` or " -"``trusty``. Replace all instances of ``VERSION`` with the version of the " -"database server that you want to install, such as ``5.6`` or ``10.0``." -msgstr "" -"各項目に対して、すべての ``DISTRO`` をお使いのディストリビューション " -"``debian`` や ``ubuntu`` などに置き換えます。すべての ``RELEASE`` をディスト" -"リビューションのリリース名 ``wheezy`` や ``trusty`` に置き換えます。すべての " -"``VERSION`` をインストールしたいデータベースサーバーのバージョン ``5.6`` や " -"``10.0`` などに置き換えます。" - -msgid "" -"For each instance of OpenStack database in your cluster, run the following " -"commands, replacing ``NODE-IP-ADDRESS`` with the IP address of the cluster " -"node you want to open the firewall to:" -msgstr "" -"クラスターにある OpenStack データベースの各インスタンス向けに、以下のコマンド" -"を実行します。``NODE-IP-ADDRESS`` をファイアウォールを開きたいクラスターノー" -"ドの IP アドレスで置き換えます。" - -msgid "" -"For environments that do not support multicast, Corosync should be " -"configured for unicast. An example fragment of the :file:`corosync.conf` " -"file for unicastis shown below:" -msgstr "" -"マルチキャストをサポートしていない場合、Corosync はユニキャストで設定すべきで" -"す。ユニキャスト向け :file:`corosync.conf` ファイルの設定例を以下に示します。" - -msgid "" -"For firewall configurations, note that Corosync communicates over UDP only, " -"and uses ``mcastport`` (for receives) and ``mcastport - 1`` (for sends)." -msgstr "" -"ファイアウォール設定に向け、Corosync は UDP のみで通信して、 ``mcastport`` " -"(受信用) と ``mcastport - 1`` (送信用) を使用することに注意してください。" - -msgid "" -"For information about the required configuration options that have to be set " -"in the :file:`ceilometer.conf` configuration file for both the central and " -"compute agents, see the `coordination section `__ in the OpenStack Configuration Reference." -msgstr "" -"中央エージェントとコンピュートエージェントの両方の :file:`ceilometer.conf` 設" -"定ファイルに設定する必要があるオプションの詳細は、OpenStack Configuration " -"Reference の `coordination section `__ を参照してくだ" -"さい。" - -msgid "" -"For many Linux distributions, you can configure the firewall using the " -"``firewall-cmd`` utility for FirewallD. To do so, complete the following " -"steps on each cluster node:" -msgstr "" -"多くの Linux ディストリビューションの場合、FirewallD 向けの ``firewall-cmd`` " -"ユーティリティーを使用して、ファイアウォールを設定できます。そうするために、" -"各クラスターノードに以下の手順を実行します。" - -msgid "" -"For many Linux distributions, you can configure the firewall using the " -"``iptables`` utility. To do so, complete the following steps:" -msgstr "" -"多くの Linux ディストリビューションの場合、``iptables`` ユーティリティーを使" -"用してファイアウォールを設定できます。そのために、以下の手順を実行します。" - -msgid "" -"For more information about configuring storage back ends for the different " -"storage options, see the `Cloud Administrator Guide `_." -msgstr "" -"さまざまなストレージの選択肢に対して、ストレージバックエンドを設定する方法の" -"詳細は、 `Cloud Administrator Guide `_ を参照してください。" - -msgid "" -"For more information on configuring SELinux to work with Galera Cluster, see " -"the `Documentation `_" -msgstr "" -"Galera Cluster と動作する SELinux を設定する方法の詳細は `ドキュメント " -"`_ を参照してく" -"ださい。" - -msgid "" -"For more information on firewalls, see `Firewalls and default ports `_, in the Configuration Reference." -msgstr "" -"ファイアウォールの詳細は、Configuration Reference の `Firewalls and default " -"ports `_ を参照してください。" - -msgid "" -"For more information, see the official installation manual for the " -"distribution:" -msgstr "" -"詳細はディストリビューションの公式インストールガイドを参照してください。" - -msgid "For servers that use ``systemd``, instead run these commands:" -msgstr "" -"``systemd`` を使用するサーバーの場合、これらのコマンドを代わりに実行します。" - -msgid "For servers that use ``systemd``, instead run this command:" -msgstr "" -"``systemd`` を使用するサーバーの場合、代わりにこのコマンドを実行します。" - -msgid "" -"For servers that use ``systemd``, you need to save the current packet " -"filtering to the path of the file that ``iptables`` reads when it starts. " -"This path can vary by distribution, but common locations are in the ``/etc`` " -"directory, such as:" -msgstr "" -"``systemd`` を使用するサーバーの場合、現在のパケットフィルタリングの内容を、 " -"``iptables`` が起動時に参照するファイルに保存する必要があります。このパスは、" -"ディストリビューションにより異なりますが、次のように、一般的に ``/etc`` ディ" -"レクトリーにあります。" - -msgid "" -"For the documentation of these parameters, wsrep Provider option and status " -"variables available in Galera Cluster, see `Reference `_." -msgstr "" -"Galera Cluster において利用できる、これらのパラメーター、wsrep プロバイダーオ" -"プション、状態変数のドキュメントは、`Reference `_ を参照してください。" - -msgid "" -"For this reason, the use of a cluster manager like `Pacemaker `_ is highly recommended." -msgstr "" -"この理由により、 `Pacemaker `_ のようなクラスターマ" -"ネージャーの利用が強く推奨されます。" - -msgid "" -"Galera Cluster configuration parameters all have the ``wsrep_`` prefix. " -"There are five that you must define for each cluster node in your OpenStack " -"database." -msgstr "" -"Galera Cluster の設定パラメーターは、すべて ``wsrep_`` プレフィックスを持ちま" -"す。OpenStack データベースにおいて、各クラスターノード向けに定義する必要があ" -"るものが 5 個あります。" - -msgid "" -"Galera Cluster does not support non-transactional storage engines and " -"requires that you use InnoDB by default. There are some additional " -"parameters that you must define to avoid conflicts." -msgstr "" -"Galera Cluster は、トランザクション未対応ストレージエンジンをサポートしませ" -"ん。デフォルトでは InnoDB を使用する必要があります。競合を避けるために定義す" -"る必要のある追加パラメーターがいくつかあります。" - -msgid "Galera Cluster for MySQL" -msgstr "Galera Cluster for MySQL" - -msgid "Galera Cluster for MySQL:" -msgstr "Galera Cluster for MySQL:" - -msgid "" -"Galera Cluster is not available in the base repositories of Linux " -"distributions. In order to install it with your package manage, you must " -"first enable the repository on your system. The particular methods for doing " -"so vary depending on which distribution you use for OpenStack and which " -"database server you want to use." -msgstr "" -"Galera Cluster は、Linux ディストリビューションの標準リポジトリーにおいて利用" -"できません。パッケージ管理機能を用いてインストールするために、まずお使いのシ" -"ステムにおいてリポジトリーを有効化する必要があります。具体的な手順は、" -"OpenStack のために使用するディストリビューション、使用したいデータベースサー" -"バーによりかなり異なります。" - -msgid "" -"Galera Cluster is now installed on your system. You must repeat this process " -"for each controller node in your cluster." -msgstr "" -"これで Galera Cluster がお使いのシステムにインストールされました。クラスター" -"内のすべてのコントローラーに、このプロセスを繰り返す必要があります。" - -msgid "Galera Cluster requires that you open four ports to network traffic:" -msgstr "" -"Galera Cluster は、ネットワーク通信のために 4 つのポートを開く必要がありま" -"す。" - -msgid "Galera can be configured using one of the following strategies:" -msgstr "Galera は、以下の方法のどれかにより設定できます。" - -msgid "Galera runs behind HAProxy." -msgstr "Galera は HAProxy の後ろで動作します" - -msgid "" -"Galera synchronous replication guarantees a zero slave lag. The failover " -"procedure completes once HAProxy detects that the active back end has gone " -"down and switches to the backup one, which is then marked as 'UP'. If no " -"back ends are up (in other words, the Galera cluster is not ready to accept " -"connections), the failover procedure finishes only when the Galera cluster " -"has been successfully reassembled. The SLA is normally no more than 5 " -"minutes." -msgstr "" -"Galera の同期レプリケーションは、スレーブのラグがないことを保証します。フェイ" -"ルオーバー手順は、アクティブなバックエンドがダウンしたことを HAProxy が検知す" -"ると、バックアップに切り替え、「UP」状態になります。バックエンドが UP になら" -"ない場合、つまり Galera クラスターが接続を受け付ける準備ができていない場合、" -"Galera クラスターが再び正常に再構成された場合のみ、フェイルオーバー手順が完了" -"します。SLA は、通常 5 分以内です。" - -msgid "Get a list of nodes known to the quorum service" -msgstr "クォーラムサービスが把握しているノード一覧の取得" - -msgid "HAProxy" -msgstr "HAProxy" - -msgid "" -"HAProxy load balances incoming requests and exposes just one IP address for " -"all the clients." -msgstr "" -"HAProxy は、受信リクエストを負荷分散して、すべてのクライアントに 1 つの IP ア" -"ドレスを公開します。" - -msgid "" -"HAProxy provides a fast and reliable HTTP reverse proxy and load balancer " -"for TCP or HTTP applications. It is particularly suited for web crawling " -"under very high loads while needing persistence or Layer 7 processing. It " -"realistically supports tens of thousands of connections with recent hardware." -msgstr "" -"HAProxy は、TCP や HTTP ベースのアプリケーションに、高速かつ高信頼な HTTP リ" -"バースプロキシーとロードバランサーを提供します。とくに、永続性や L7 処理を必" -"要とする、非常に高負荷な Web サイトに適しています。最近のハードウェアを用いる" -"と、数千の接続を現実的にサポートします。" - -msgid "" -"HAProxy should not be a single point of failure. It is advisable to have " -"multiple HAProxy instances running, where the number of these instances is a " -"small odd number like 3 or 5. You need to ensure its availability by other " -"means, such as Keepalived or Pacemaker." -msgstr "" -"HAProxy は単一障害点になってはいけません。HAProxy のインスタンスは 3台 また" -"は 5台のような奇数の複数台構成にすることを推奨します。Keepalived や " -"Pacemaker などの他の手段により、可用性を保証する必要があります。" - -msgid "Hardware considerations for high availability" -msgstr "高可用性のためのハードウェア考慮事項" - -msgid "Hardware setup" -msgstr "ハードウェアのセットアップ" - -msgid "" -"Here is an example ``/etc/haproxy/haproxy.cfg`` configuration file. You need " -"a copy of it on each controller node." -msgstr "" -"これは ``/etc/haproxy/haproxy.cfg`` 設定ファイルの例です。各コントローラー" -"ノードにコピーする必要があります。" - -msgid "High availability concepts" -msgstr "高可用性の概念" - -msgid "High availability for other components" -msgstr "他のコンポーネントの高可用性" - -msgid "" -"High availability is not for every user. It presents some challenges. High " -"availability may be too complex for databases or systems with large amounts " -"of data. Replication can slow large systems down. Different setups have " -"different prerequisites. Read the guidelines for each setup." -msgstr "" -"高可用性はあらゆるユーザー向けではありません。いくつかの挑戦を妨害します。高" -"可用性は、大量のデータを持つデータベースやシステムをあまりに複雑にする可能性" -"があります。レプリケーションは大規模システムをスローダウンさせる可能性があり" -"ます。異なるセットアップには、異なる事前要件があります。各セットアップのガイ" -"ドラインを参照してください。" - -msgid "High availability is turned off as the default in OpenStack setups." -msgstr "高可用性は、デフォルトの OpenStack セットアップで無効化されています。" - -msgid "High availability strategies" -msgstr "高可用性の戦略" - -msgid "High availability systems seek to minimize two things:" -msgstr "高可用性システムは、以下の 2 つを最小にすることを目指しています。" - -msgid "" -"High availability systems typically achieve an uptime percentage of 99.99% " -"or more, which roughly equates to less than an hour of cumulative downtime " -"per year. In order to achieve this, high availability systems should keep " -"recovery times after a failure to about one to two minutes, sometimes " -"significantly less." -msgstr "" -"高可用性システムは、一般的に 99.99% 以上の稼働率を達成します。おそよ年間 1 時" -"間未満の停止時間になります。高可用性システムは、これを実現するために、障害発" -"生後の復旧時間を 1 ~ 2 分以内に、ときにはさらに短く抑えるべきです。" - -msgid "Highly available Block Storage API" -msgstr "高可用性 Block Storage API" - -msgid "Highly available OpenStack Image API" -msgstr "高可用性 OpenStack Image API" - -msgid "Highly available Shared File Systems API" -msgstr "高可用性 Shared File Systems API" - -msgid "" -"How frequently to retry connecting with RabbitMQ: [TODO: document the unit " -"of measure here? Seconds?]" -msgstr "" -"How frequently to retry connecting with RabbitMQ: [TODO: document the unit " -"of measure here? Seconds?]" - -msgid "" -"How long to back-off for between retries when connecting to RabbitMQ: [TODO: " -"document the unit of measure here? Seconds?]" -msgstr "" -"How long to back-off for between retries when connecting to RabbitMQ: [TODO: " -"document the unit of measure here? Seconds?]" - -msgid "" -"However, OpenStack does not require a significant amount of resources and " -"the following minimum requirements should support a proof-of-concept high " -"availability environment with core services and several instances:" -msgstr "" -"しかしながら、OpenStack は膨大なリソースを必要としません。以下の最小要件は、" -"コアサービスといくつかのインスタンスを動かす検証 (POC) 環境には対応できること" -"でしょう。" - -msgid "" -"However, running an OpenStack environment on VMs degrades the performance of " -"your instances, particularly if your hypervisor and/or processor lacks " -"support for hardware acceleration of nested VMs." -msgstr "" -"しかしながら、仮想マシン上で OpenStack 環境を実行すると、インスタンスの性能が" -"悪くなります。とくに、ハイパーバイザーとプロセッサーが nested 仮想マシンの" -"ハードウェア支援機能をサポートしない場合は顕著です。" - -msgid "" -"However, the reasons vary and are discussed under each component's heading." -msgstr "" -"しかしながら、理由はさまざまであり、各コンポーネントの項目において議論されま" -"す。" - -msgid "Identity services (keystone)" -msgstr "Identity サービス (keystone)" - -msgid "" -"If the Block Storage service runs on the same nodes as the other services, " -"then it is advisable to also include:" -msgstr "" -"Block Storage サービスが他のサービスと同じノードで実行している場合、以下も含" -"めることを推奨します。" - -msgid "" -"If the ``broadcast`` parameter is set to yes, the broadcast address is used " -"for communication. If this option is set, the ``mcastaddr`` parameter should " -"not be set." -msgstr "" -"``broadcast`` パラメーターが yes に設定されている場合、ブロードキャストアドレ" -"スが通信に使用されます。このオプションが設定されている場合、``mcastaddr`` パ" -"ラメーターは設定すべきではありません。" - -msgid "" -"If the cluster is working, you can create usernames and passwords for the " -"queues." -msgstr "" -"クラスターが動作していると、キューのユーザー名とパスワードを作成できます。" - -msgid "" -"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " -"lines under the service stanza, which enables Pacemaker to start up. Another " -"potential problem is the boot and shutdown order of Corosync and Pacemaker. " -"To force Pacemaker to start after Corosync and stop before Corosync, fix the " -"start and kill symlinks manually:" -msgstr "" -"Ubuntu 14.04 において Corosync バージョン 2 を使用している場合、サービスの節" -"の下にある行を削除するかコメントアウトします。これにより、Pacemaker が起動で" -"きます。別の潜在的な問題は、Corosync と Pacemaker の起動と停止の順番です。必" -"ず Pacemaker が Corosync の後に起動して、Corosync の前に停止させるために、" -"start と kill のシンボリックリンクを手動で修正します。" - -msgid "" -"If you are using Corosync version 2, use the :command:`corosync-cmapctl` " -"utility instead of :command:`corosync-objctl`; it is a direct replacement." -msgstr "" -"Corosync バージョン 2 を使用している場合、 :command:`corosync-objctl` の代わ" -"りに :command:`corosync-cmapctl` ユーティリティーを使用します。これは、そのま" -"ま置き換えられます。" - -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IP addresses and define your endpoint like this:" -msgstr "" -"プライベート IP とパブリック IP の両方を使用する場合、2 つの仮想 IP アドレス" -"を作成し、次のようにエンドポイントを定義すべきです。" - -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IPs and define your endpoint like this:" -msgstr "" -"プライベート IP アドレスとパブリック IP アドレスの両方を使用する場合、2 つの" -"仮想 IP アドレスを作成し、次のようにエンドポイントを定義すべきです。" - -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IPs and define your endpoints like this:" -msgstr "" -"プライベート IP アドレスとパブリック IP アドレスの両方を使用する場合、2 つの" -"仮想 IP アドレスを作成し、次のようにエンドポイントを定義すべきです。" - -msgid "" -"If you are using the Block Storage service OCF agent, some settings will be " -"filled in for you, resulting in a shorter configuration file:" -msgstr "" -"Block Storage サービス OCF エージェントを使用している場合、いくつかの設定は入" -"力されていて、設定ファイルを短くできます。" - -msgid "" -"If you are using the horizon dashboard, edit the :file:`local_settings.py` " -"file to include the following:" -msgstr "" -"Dashboard を使用している場合、以下の内容を含めた :file:`local_settings.py` " -"ファイルを編集します。" - -msgid "" -"If you change the configuration from an old set-up that did not use HA " -"queues, you should restart the service:" -msgstr "" -"HA キューを使用していない古いセットアップから設定を変更した場合、サービスを再" -"起動しなければいけません。" - -msgid "" -"If you find any or all of these limitations concerning, you are encouraged " -"to refer to the :doc:`Pacemaker HA architecture` " -"instead." -msgstr "" -"これらの制限に関する心配がある場合、代わりに :doc:`Pacemaker HA " -"architecture` を参照することを推奨します。" - -msgid "" -"If you use HAProxy for load-balancing client access to Galera Cluster as " -"described in the :doc:`controller-ha-haproxy`, you can use the " -"``clustercheck`` utility to improve health checks." -msgstr "" -":doc:`controller-ha-haproxy` に記載されているとおり、Galera Cluster へのクラ" -"イアントアクセスを負荷分散するために、HAProxy を使用している場合、 " -"``clustercheck`` ユーティリティーを使用して、より良くヘルスチェックできます。" - -msgid "" -"Image service (glance) can use the Object Storage service (swift) or Ceph " -"RBD as the storage back end." -msgstr "" -"Image サービス (glance) は、ストレージバックエンドとして Object Storage サー" -"ビス (swift) や Ceph RBD を使用できます。" - -msgid "" -"In Corosync configurations using redundant networking (with more than one " -"interface), you must select a Redundant Ring Protocol (RRP) mode other than " -"none. ``active`` is the recommended RRP mode." -msgstr "" -"(複数のインターフェースを用いた) 冗長ネットワークを使用する Corosync 設定にお" -"いて、none ではなく、Redundant Ring Protocol (RRP) を選択する必要があります。" -"``active`` が RRP の推奨モードです。" - -msgid "" -"In Galera Cluster, the Primary Component is the cluster of database servers " -"that replicate into each other. In the event that a cluster node loses " -"connectivity with the Primary Component, it defaults into a non-operational " -"state, to avoid creating or serving inconsistent data." -msgstr "" -"Galera Cluster では、Primary Component が、お互いにレプリケーションするデータ" -"ベースサーバーのクラスターです。クラスターノードが Primary Component との接続" -"性を失った場合、不整合なデータの作成や処理を避けるために、デフォルトで非稼働" -"状態になります。" - -msgid "" -"In Red Hat Enterprise Linux or CentOS environments, this is a recommended " -"path to perform configuration. For more information, see the `RHEL docs " -"`_." -msgstr "" -"Red Hat Enterprise Linux や CentOS 環境の場合、設定するための推奨パスがありま" -"す。詳細は `RHEL docs `_ を参照してください。" - -msgid "" -"In addition to Galera Cluster, you can also achieve high availability " -"through other database options, such as PostgreSQL, which has its own " -"replication system." -msgstr "" -"Galera Cluster 以外に、独自のレプリケーションシステムを持つ PostgreSQL など、" -"他のデータベースにより高可用性を実現することもできます。" - -msgid "" -"In general we can divide all the OpenStack components into three categories:" -msgstr "" -"一般的に、すべての OpenStack コンポーネントは 3 つのカテゴリーに分割できま" -"す。" - -msgid "In summary though:" -msgstr "概要:" - -msgid "" -"In the event that you already installed the standalone version of MySQL, " -"MariaDB or Percona XtraDB, this installation purges all privileges on your " -"OpenStack database server. You must reapply the privileges listed in the " -"installation guide." -msgstr "" -"すでに MySQL、MariaDB、Percona XtraDB のスタンドアロン版をインストールしてい" -"る場合、このインストールにより、お使いの OpenStack データベースサーバーにおい" -"て、すべての権限が削除されます。インストールガイドにまとめられている権限を再" -"適用する必要があります。" - -msgid "" -"In the event that you also want to configure multicast replication, run this " -"command as well:" -msgstr "" -"また、マルチキャストレプリケーションを設定したいイベントにおいて、このコマン" -"ドを同じように実行します。" - -msgid "" -"In the event that you also want to configure mutlicast replication, run this " -"command as well:" -msgstr "" -"また、マルチキャストレプリケーションを設定したいイベントにおいて、このコマン" -"ドを同じように実行します。" - -msgid "" -"In the event that you do not know the release code-name for your " -"distribution, you can use the following command to find it out:" -msgstr "" -"お使いのディストリビューションのリリースコード名がわからない場合、以下のコマ" -"ンドを使用して確認できます。" - -msgid "" -"In the event that you need to restart any cluster node, you can do so. When " -"the database server comes back it, it establishes connectivity with the " -"Primary Component and updates itself to any changes it may have missed while " -"down." -msgstr "" -"クラスターノードをどれか再起動する必要がある場合、実行できます。データベース" -"サーバーが戻ってきたとき、Primary Component との接続を確立して、停止中に失っ" -"た変更をすべて自身に適用します。" - -msgid "" -"In the event that you use multicast replication, you also need to open " -"``4567`` to UDP traffic:" -msgstr "" -"マルチキャストレプリケーションを使用する場合、UDP の ``4567`` 番ポートも開く" -"必要があります。" - -msgid "" -"In the text: Replace ``DISTRO`` with the name of the distribution you use, " -"such as ``sles`` or ``opensuse``. Replace ``RELEASE`` with the version " -"number of that distribution." -msgstr "" -"``DISTRO`` を使用する ``sles`` や ``opensuse`` などのディストリビューションの" -"名前で置き換えます。 ``RELEASE`` をディストリビューションのバージョン番号に置" -"き換えます。" - -msgid "" -"In the text: Replace ``VERSION`` with the version of MariaDB you want to " -"install, such as ``5.6`` or ``10.0``. Replace package with the package " -"architecture you want to use, such as ``opensuse13-amd64``." -msgstr "" -"テキストにおいて、インストールしたい MariaDB のバージョン、``5.6`` や " -"``10.0`` などで ``VERSION`` を置き換えます。使用したいパッケージアーキテク" -"チャー、``opensuse13-amd64`` などで package を置き換えます。" - -msgid "" -"In theory, you can run the Block Storage service as active/active. However, " -"because of sufficient concerns, it is recommended running the volume " -"component as active/passive only." -msgstr "" -"理論的には、Block Storage サービスをアクティブ/アクティブとして実行できます。" -"しかしながら、十分な課題のため、ボリュームコンポーネントをアクティブ/パッシブ" -"のみとして実行することが推奨されます。" - -msgid "In this case that is a problem though, because:" -msgstr "この場合、以下の理由で、それは問題になります。" - -msgid "" -"In this configuration, each service runs in a dedicated cluster of 3 or more " -"nodes." -msgstr "" -"この設定では、各サービスが 3 以上のノードの専用クラスターで動作します。" - -msgid "" -"In this configuration, there is a single cluster of 3 or more nodes on which " -"every component is running." -msgstr "" -"この設定では、すべてのコンポーネントが動作する、3 つ以上のノードを持つシング" -"ルクラスターがあります。" - -msgid "" -"Individual cluster nodes can stop and be restarted without issue. When a " -"database loses its connection or restarts, Galera Cluster brings it back " -"into sync once it reestablishes connection with the Primary Component. In " -"the event that you need to restart the entire cluster, identify the most " -"advanced cluster node and initialize the Primary Component on that node." -msgstr "" -"各クラスターノードは、問題なく停止したり再起動したりできます。データベースが" -"接続を失ったり、再起動したりしたとき、Primary Component と再接続されると、" -"Galera Cluster は同期状態に戻ります。クラスター全体を再起動する必要があると" -"き、最も高度なクラスターノードを識別し、そのノードの Primary Component を初期" -"化します。" - -msgid "" -"Initialize the Primary Component on one cluster node. For servers that use " -"``init``, run the following command:" -msgstr "" -"1 つのクラスターノードにおいて Primary Component を初期化します。``init`` を" -"使用するサーバーの場合、以下のコマンドを実行します。" - -msgid "Initializing the cluster" -msgstr "クラスターの初期化" - -msgid "Install RabbitMQ" -msgstr "RabbitMQ のインストール" - -msgid "Install memcached" -msgstr "memcached のインストール" - -msgid "Install operating system on each node" -msgstr "各ノードへのオペレーティングシステムのインストール" - -msgid "Install packages" -msgstr "パッケージのインストール" - -msgid "Installation" -msgstr "インストール" - -msgid "Installing Galera Cluster" -msgstr "Galera Cluster のインストール" - -msgid "Installing high availability packages" -msgstr "高可用性パッケージのインストール" - -msgid "Introduction to OpenStack high availability" -msgstr "OpenStack 高可用性の概要" - -msgid "" -"It is also possible to follow a segregated approach for one or more " -"components that are expected to be a bottleneck and use a collapsed approach " -"for the remainder." -msgstr "" -"1 つ以上のコンポーネントに対して、別々のアプローチをとることができますが、ボ" -"トルネックになり、思い出すことが難しいアプローチを使用する可能性があります。" - -msgid "" -"It is important to note that HAProxy has no idea that any of this is " -"happening. As far as its process is concerned, it called ``write()`` with " -"the data and the kernel returned success. The resolution is already " -"understood and just needs to make its way through a review." -msgstr "" -"HAProxy は、これが発生したときのアイディアがないことに、とくに注意してくださ" -"い。そのプロセスが関係している限り、データと一緒に ``write()`` を呼び出し、" -"カーネルが成功を返します。この解決方法は、すでにわかっていて、ただレビューを" -"通す必要があります。" - -msgid "" -"It is possible to add controllers to such an environment to convert it into " -"a truly highly available environment." -msgstr "" -"コントローラーをそのような環境に追加して、それを信頼できる高可用性環境に変え" -"られます。" - -msgid "Jon Bernard writes:" -msgstr "Jon Bernard は次のように書きました。" - -msgid "" -"Jon Eck found the `core issue `_ and went into some detail regarding the " -"`history and solution `_ on his blog." -msgstr "" -"Jon Eck さんは、 `コアな問題 `_ を発見して、彼のブログにおいて `経緯と解決策 " -"`_ " -"に関する詳細を述べました。" - -msgid "Keepalived and network partitions" -msgstr "Keepalived とネットワーク分割" - -msgid "Keepalived, for the HAProxy instances." -msgstr "Keepalived、HAProxy インスタンス向け。" - -msgid "Key" -msgstr "キー" - -msgid "" -"Link to `Networking Guide `_ " -"for configuration details." -msgstr "" -"設定の詳細は `Networking Guide `_ を参照してください。" - -msgid "Load distribution" -msgstr "負荷分散" - -msgid "" -"Log in to the database client and grant the ``clustercheck`` user " -"``PROCESS`` privileges." -msgstr "" -"データベースクライアントにログインして、``clustercheck`` ユーザーに " -"``PROCESS`` 権限を与えます。" - -msgid "Make sure pcs is running and configured to start at boot time:" -msgstr "" -"pcs が実行中で、ブート時に起動するよう設定されていることを確認してください。" - -msgid "" -"Make the changes persistent. For servers that use ``init``, use the :command:" -"`save` command:" -msgstr "" -"変更を永続化します。 ``init`` を使用するサーバーの場合、 :command:`save` コマ" -"ンドを使用します。" - -msgid "" -"Making the Block Storage (cinder) API service highly available in active/" -"passive mode involves:" -msgstr "" -"Block Storage (cinder) API サービスのアクティブ/パッシブモードでの高可用性" -"は、以下が関係します。" - -msgid "" -"Making the OpenStack Identity service highly available in active / passive " -"mode involves:" -msgstr "" -"OpenStack Identity Service をアクティブ / パッシブモードで高可用性にすること" -"は、次のことが関連します。" - -msgid "" -"Making the RabbitMQ service highly available involves the following steps:" -msgstr "RabbitMQ サービスを高可用性にすることは、以下の手順が関連します。" - -msgid "" -"Making the Shared File Systems (manila) API service highly available in " -"active/passive mode involves:" -msgstr "" -"Shared File Systems (manila) API サービスのアクティブ/パッシブモードでの高可" -"用性は、以下が関係します。" - -msgid "" -"Making this Block Storage API service highly available in active/passive " -"mode involves:" -msgstr "" -"Block Storage API サービスのアクティブ/パッシブモードでの高可用性は、以下が関" -"係します。" - -msgid "Management" -msgstr "マネジメント" - -msgid "" -"Managing the Block Storage API daemon with the Pacemaker cluster manager" -msgstr "" -"Pacemaker クラスターマネージャーを用いた Block Storge API デーモンの管理" - -msgid "Manual recovery after a full cluster restart" -msgstr "完全なクラスター再起動後の手動リカバリー" - -msgid "" -"Many services can act in an active/active capacity, however, they usually " -"require an external mechanism for distributing requests to one of the " -"available instances. The proxy server can serve this role." -msgstr "" -"ほとんどのサービスがアクティブ/アクティブ機能で動作できます。しかしながら、通" -"常は分散されたリクエストが利用できるインスタンスのどれかになる外部機能が必要" -"になります。プロキシーサーバーはこの役割になれます。" - -msgid "MariaDB Galera Cluster" -msgstr "MariaDB Galera Cluster" - -msgid "MariaDB Galera Cluster:" -msgstr "MariaDB Galera Cluster:" - -msgid "Maximum number of network nodes to use for the HA router." -msgstr "HA ルーターのために使用するネットワークノードの最大数" - -msgid "" -"Maximum retries with trying to connect to RabbitMQ (infinite by default):" -msgstr "RabbitMQ に接続を試行する最大回数 (デフォルトで無制限):" - -msgid "Memcached" -msgstr "Memcached" - -msgid "" -"Memcached is a general-purpose distributed memory caching system. It is used " -"to speed up dynamic database-driven websites by caching data and objects in " -"RAM to reduce the number of times an external data source must be read." -msgstr "" -"Memcached は汎用の分散メモリーキャッシュシステムです。データやオブジェクトを" -"メモリーにキャッシュすることにより、外部データソースの読み込み回数を減らし、" -"データベースを利用した動的 Web サイトを高速化するために使用されます。" - -msgid "" -"Memcached is a memory cache demon that can be used by most OpenStack " -"services to store ephemeral data, such as tokens." -msgstr "" -"Memcached は、ほとんどの OpenStack サービスがトークンなどの一時的なデータを保" -"存するために使用できる、メモリーキャッシュのデーモンです。" - -msgid "" -"Memcached uses a timeout value, which should always be set to a value that " -"is higher than the heartbeat value set for Telemetry." -msgstr "" -"Memcached は、タイムアウト値を使用します。これは、Telemetry 向けに設定された" -"ハートビート値よりも大きい値を常に設定されるべきです。" - -msgid "Memory" -msgstr "メモリー" - -msgid "" -"Memory caching is managed by `oslo.cache `_ so the way " -"to use multiple memcached servers is the same for all projects." -msgstr "" -"メモリーキャッシュは `oslo.cache `_ により管理されています。そ" -"のため、複数の memcached サーバーを使用する方法が、すべてのプロジェクトで同じ" -"になります。" - -msgid "" -"Minimum number of network nodes to use for the HA router. A new router can " -"be created only if this number of network nodes are available." -msgstr "" -"HA ルーターのために使用するネットワークノードの最小数。この数だけのネットワー" -"クノードを利用できる場合のみ、新規ルーターを作成できます。" - -msgid "" -"Mirrored queues in RabbitMQ improve the availability of service since it is " -"resilient to failures." -msgstr "" -"RabbitMQ のキューミラーは、障害耐性があるので、サービスの可用性を改善します。" - -msgid "Mixed" -msgstr "Mixed" - -msgid "MongoDB" -msgstr "MongoDB" - -msgid "More information is available in the RabbitMQ documentation:" -msgstr "詳細は RabbitMQ のドキュメントにあります。" - -msgid "" -"Most OpenStack services can use memcached to store ephemeral data such as " -"tokens. Although memcached does not support typical forms of redundancy such " -"as clustering, OpenStack services can use almost any number of instances by " -"configuring multiple hostnames or IP addresses. The memcached client " -"implements hashing to balance objects among the instances. Failure of an " -"instance only impacts a percentage of the objects and the client " -"automatically removes it from the list of instances." -msgstr "" -"ほとんどの OpenStack サービスは、トークンなどの一時データを保存するために " -"memcached を使用できます。memcached はクラスターなどの一般的な形式の冗長化を" -"サポートしませんが、OpenStack サービスは複数のホスト名や IP アドレスを設定す" -"ることにより、ほぼ任意の数のインスタンスを使用できます。Memcached クライアン" -"トは、インスタンス間でオブジェクトを分散するハッシュ機能を持ちます。インスタ" -"ンスの障害は、オブジェクトの使用率のみに影響します。クライアントは、インスタ" -"ンスの一覧から自動的に削除されます。" - -msgid "" -"Most distributions ship an example configuration file (:file:`corosync.conf." -"example`) as part of the documentation bundled with the Corosync package. An " -"example Corosync configuration file is shown below:" -msgstr "" -"ほとんどのディストリビューションは、Corosync パッケージに同梱されているドキュ" -"メントの一部として、サンプル設定ファイル (:file:`corosync.conf.example`) を同" -"梱しています。" - -msgid "" -"Most high availability systems fail in the event of multiple independent " -"(non-consequential) failures. In this case, most implementations favor " -"protecting data over maintaining availability." -msgstr "" -"多くの高可用性システムは、複数の独立した (不連続な) 障害が発生すると停止しま" -"す。この場合、多くのシステムは可用性の維持よりデータを保護することを優先しま" -"す。" - -msgid "" -"Most high availability systems guarantee protection against system downtime " -"and data loss only in the event of a single failure. However, they are also " -"expected to protect against cascading failures, where a single failure " -"deteriorates into a series of consequential failures. Many service providers " -"guarantee :term:`Service Level Agreement (SLA)` including uptime percentage " -"of computing service, which is calculated based on the available time and " -"system downtime excluding planned outage time." -msgstr "" -"多くの高可用性システムは、単一障害事象のみにおいて、システム停止時間やデータ" -"損失に対する保護を保証します。しかしながら、単一障害が一連の障害を悪化させて" -"いく、段階的な障害に対しても保護されることが期待されます。多くのサービスプロ" -"バイダーは、コンピューティングサービスの稼働率などの :term:`Service Level " -"Agreement (SLA)` を保証します。それは、計画停止を除くシステム停止時間と稼働時" -"間に基づいて計算されます。" - -msgid "" -"Most of this guide concerns the control plane of high availability: ensuring " -"that services continue to run even if a component fails. Ensuring that data " -"is not lost is the data plane component of high availability; this is " -"discussed here." -msgstr "" -"このガイドのほとんどは、コントロールプレーンの高可用性を取り扱います。コン" -"ポーネントが故障した場合でも、そのサービスが動作しつづけることを保証します。" -"データ失われないことを保証することは、データプレーンのコンポーネントの高可用" -"性です。それは、ここで議論します。" - -msgid "" -"Multicast groups (``mcastaddr``) must not be reused across cluster " -"boundaries. In other words, no two distinct clusters should ever use the " -"same multicast group. Be sure to select multicast addresses compliant with " -"`RFC 2365, \"Administratively Scoped IP Multicast\" `_." -msgstr "" -"マルチキャストグループ (``mcastaddr``) は、クラスターの境界を越えて再利用でき" -"ません。別の言い方をすると、2 つの独立したクラスターは、同じマルチキャストグ" -"ループを使用すべきではありません。選択したマルチキャストアドレス をきちんと" -"`RFC 2365, \"Administratively Scoped IP Multicast\" `_ に準拠させてください。" - -msgid "" -"MySQL databases, including MariaDB and Percona XtraDB, manage their " -"configurations using a ``my.cnf`` file, which is typically located in the ``/" -"etc`` directory. Configuration options available in these databases are also " -"available in Galera Cluster, with some restrictions and several additions." -msgstr "" -"MariaDB や Percona XtraDB を含む、MySQL は ``my.cnf`` ファイルを使用して設定" -"を管理します。一般的に ``/etc`` ディレクトリーにあります。これらのデータベー" -"スにおいて利用できる設定オプションは、Galera Cluster においても利用できます。" -"いくつかの制約や追加があります。" - -msgid "NIC" -msgstr "NIC" - -msgid "Network components" -msgstr "ネットワークコンポーネント" - -msgid "Network components, such as switches and routers" -msgstr "スイッチやルーターなどのネットワークの構成要素" - -msgid "" -"Neutron L2 agent. Note that the L2 agent cannot be distributed and highly " -"available. Instead, it must be installed on each data forwarding node to " -"control the virtual network drivers such as Open vSwitch or Linux Bridge. " -"One L2 agent runs per node and controls its virtual interfaces." -msgstr "" -"Neutron L2 エージェント。L2 エージェントは分散させることはできず、高可用構成" -"にはできません。その代わり、 L2 エージェントを各データ転送ノードにインストー" -"ルして、 Open vSwitch や Linux ブリッジなどの仮想ネットワークドライバーを制御" -"します。ノードあたり 1 つの L2 エージェントが動作し、そのノードの仮想インター" -"フェースの制御を行います。" - -msgid "" -"Neutron agents shuld be described for active/active; deprecate single " -"agent's instances case." -msgstr "" -"Neutron エージェントは、アクティブ/アクティブ向けにすべきです。シングルエー" -"ジェントのインスタンスは推奨されません。" - -msgid "Neutron-lbaas-agent as a single point of failure" -msgstr "単一障害点としての neutron-lbaas-agent" - -msgid "No firewalls between the hosts;" -msgstr "ホスト間にファイアウォールがないこと。" - -msgid "" -"No high availability, when the service can only work in active/passive mode." -msgstr "" -"このサービスがアクティブ/パッシブモードのみで動作する場合、高可用性はありませ" -"ん。" - -msgid "" -"No native feature is available to make this service highly available. At " -"this time, the Active/Passive solution exists to run the neutron metadata " -"agent in failover mode with Pacemaker." -msgstr "" -"このサービスを高可用化するための組み込み機能はありません。現状、アクティブ/" -"パッシブのソリューションが存在し、Pacemaker を用いてフェイルオーバーモードで " -"neutron メタデータエージェントを実行します。" - -msgid "Node type" -msgstr "ノード種別" - -msgid "" -"Note that the particular key value in this command varies depending on which " -"database software repository you want to use." -msgstr "" -"このコマンドの具体的なキーは、使用したいデータベースのソフトウェアリポジト" -"リーにより異なります。" - -msgid "Note the following about the recommended interface configuration:" -msgstr "インターフェースの推奨設定に関する注意事項がいくつかあります。" - -msgid "Note the following:" -msgstr "以下に注意してください。" - -msgid "Notes from planning outline:" -msgstr "計画の概要からのメモ:" - -msgid "" -"Occurs when a user-facing service is unavailable beyond a specified maximum " -"amount of time." -msgstr "指定された最大時間を超えて、ユーザーサービスが利用不可能になること。" - -msgid "" -"Of these options, the second one is highly recommended. Although Galera " -"supports active/active configurations, we recommend active/passive (enforced " -"by the load balancer) in order to avoid lock contention." -msgstr "" -"これらの選択肢のうち、2 番目が強く推奨されます。Galera はアクティブ/アクティ" -"ブ設定をサポートしますが、ロック競合を避けるために、(ロードバランサーにより強" -"制される) アクティブ/パッシブを推奨します。" - -msgid "On CentOS, RHEL, openSUSE, and SLES:" -msgstr "CentOS、RHEL、openSUSE、SLES の場合:" - -msgid "" -"On RHEL-based systems, you should create resources for cinder's systemd " -"agents and create constraints to enforce startup/shutdown ordering:" -msgstr "" -"RHEL 系のシステムでは、cinder の systemd エージェント向けリソースを作成して、" -"起動と停止の順番を強制する制約を作成すべきです。" - -msgid "On Ubuntu, it is configured by default." -msgstr "Ubuntu の場合、デフォルトで設定されています。" - -msgid "" -"On ``3306``, Galera Cluster uses TCP for database client connections and " -"State Snapshot Transfers methods that require the client, (that is, " -"``mysqldump``)." -msgstr "" -"``3306`` では、Galera Cluster がデータベースクライアント接続のために TCP を使" -"用します。また、クライアント 、つまり ``mysqldump`` を必要とする State " -"Snapshot Transfers メソッドを使用します。" - -msgid "" -"On ``4444`` Galera Cluster uses TCP for all other State Snapshot Transfer " -"methods." -msgstr "" -"``4444`` では、Galera Cluster が他のすべての State Snapshot Transfer メソッド" -"のために TCP を使用します。" - -msgid "" -"On ``4567`` Galera Cluster uses TCP for replication traffic. Multicast " -"replication uses both TCP and UDP on this port." -msgstr "" -"``4567`` では、Galera Cluster が複製通信のために TCP を使用します。マルチキャ" -"ストレプリケーションは、このポートで TCP と UDP を使用します。" - -msgid "On ``4568`` Galera Cluster uses TCP for Incremental State Transfers." -msgstr "" -"``4568`` では、Galera Cluster が Incremental State Transfers のために TCP を" -"使用します。" - -msgid "On a RHEL-based system, it should look something like:" -msgstr "RHEL 系システムの場合、次のようになるでしょう。" - -msgid "" -"On any host that is meant to be part of a Pacemaker cluster, you must first " -"establish cluster communications through the Corosync messaging layer. This " -"involves installing the following packages (and their dependencies, which " -"your package manager usually installs automatically):" -msgstr "" -"Pacemaker クラスターに参加させる各ホストで、まず Corosync メッセージレイヤー" -"でクラスター通信を行う必要があります。これには、以下のパッケージをインストー" -"ルする必要があります (依存パッケージも含みます。依存パッケージは通常パッケー" -"ジマネージャーにより自動的にインストールされます)。" - -msgid "" -"On each target node, verify the correct owner, group, and permissions of the " -"file :file:`erlang.cookie`." -msgstr "" -"各ターゲットノードにおいて、 :file:`erlang.cookie` の所有者、所有グループ、" -"パーミッションが正しいことを確認します。" - -msgid "" -"On the infrastructure layer, the SLA is the time for which RabbitMQ cluster " -"reassembles. Several cases are possible. The Mnesia keeper node is the " -"master of the corresponding Pacemaker resource for RabbitMQ; when it fails, " -"the result is a full AMQP cluster downtime interval. Normally, its SLA is no " -"more than several minutes. Failure of another node that is a slave of the " -"corresponding Pacemaker resource for RabbitMQ results in no AMQP cluster " -"downtime at all." -msgstr "" -"インフラ層では、SLA は RabbitMQ クラスターが再構成されるまでの時間です。いく" -"つかの場合では実現できます。Mnesia keeper ノードは、対応する RabbitMQ 用 " -"Pacemaker リソースのマスターです。停止したとき、結果として AMQP クラスターの" -"停止時間になります。通常、その SLA は、数分間より長くなることはありません。対" -"応する RabbitMQ 用 Pacemaker リソースのスレーブになっている、他のノードの停止" -"により AMQP クラスターが停止することはありません。" - -msgid "" -"Once completed, commit your configuration changes by entering :command:" -"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " -"Block Storage API service and its dependent resources on one of your nodes." -msgstr "" -"これらの手順の完了後、:command:`crm configure` メニューから :command:" -"`commit` と入力し、設定の変更をコミットします。Pacemaker は Block Storage " -"API サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, commit your configuration changes by entering :command:" -"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " -"Shared File Systems API service and its dependent resources on one of your " -"nodes." -msgstr "" -"これらの手順の完了後、:command:`crm configure` メニューから :command:" -"`commit` と入力し、設定の変更をコミットします。Pacemaker は Shared File " -"Systems API サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once configured (see example file below), add HAProxy to the cluster and " -"ensure the VIPs can only run on machines where HAProxy is active:" -msgstr "" -"設定すると (以下のサンプルファイル参照)、HAProxy をクラスターに追加して、仮" -"想 IP が HAProxy の動作しているマシンにおいてのみ動作できることを確認してくだ" -"さい。" - -msgid "" -"Once created, the :file:`corosync.conf` file (and the :file:`authkey` file " -"if the secauth option is enabled) must be synchronized across all cluster " -"nodes." -msgstr "" -"作成後、 :file:`corosync.conf` ファイル (および、secauth オプションが有効化さ" -"れている場合、 :file:`authkey`ファイル) が、すべてのクラスターノードにわたり" -"同期されている必要があります。" - -msgid "" -"Once the database server starts, check the cluster status using the " -"``wsrep_cluster_size`` status variable. From the database client, run the " -"following command:" -msgstr "" -"データベースサーバーが起動すると、``wsrep_cluster_size`` 状態変数を使用して、" -"クラスター状態を確認します。データベースクライアントから、以下のコマンドを実" -"行します。" - -msgid "" -"One physical server can support multiple nodes, each of which supports " -"almost any number of network interfaces." -msgstr "" -"1 台の物理サーバーで複数のノードを構築できます。各ノードは複数のネットワーク" -"インターフェースを持てます。" - -msgid "" -"One uses a cluster manager such as Pacemaker or Veritas to co-ordinate the " -"actions of the various services across a set of machines. Since we are " -"focused on FOSS, we will refer to this as the Pacemaker architecture." -msgstr "" -"あるものは、Pacemaker や Veritas などのクラスターマネージャーを使用して、複数" -"のマシンにまたがるさまざまなサービスの動作を調整します。私たちは FOSS に注力" -"しているため、Pacemaker のアーキテクチャーを参照します。" - -msgid "" -"OpenStack APIs, these are HTTP(s) stateless services written in python, easy " -"to duplicate and mostly easy to load balance." -msgstr "" -"OpenStack API。これらは HTTP のステートレスサービスです。Python で書かれてい" -"て、簡単に冗長化でき、かなり簡単に負荷分散できます。" - -msgid "OpenStack Block Storage" -msgstr "OpenStack Block Storage" - -msgid "OpenStack Compute" -msgstr "OpenStack Compute" - -msgid "OpenStack High Availability Guide" -msgstr "OpenStack 高可用性ガイド" - -msgid "" -"OpenStack Identity (keystone) is the Identity service in OpenStack that is " -"used by many services. You should be familiar with `OpenStack identity " -"concepts `_ before proceeding." -msgstr "" -"OpenStack Identity (keystone) は、多くのサービスにより使用される OpenStack " -"の Identity サービスです。続行する前に `OpenStack Identity の概念 `_ に慣れておくべきです。" - -msgid "OpenStack Networking" -msgstr "OpenStack Networking" - -msgid "" -"OpenStack currently meets such availability requirements for its own " -"infrastructure services, meaning that an uptime of 99.99% is feasible for " -"the OpenStack infrastructure proper. However, OpenStack does not guarantee " -"99.99% availability for individual guest instances." -msgstr "" -"OpenStack 自体のインフラストラクチャーは、現在その可用性要件を満たせます。つ" -"まり、適切な OpenStack インフラストラクチャーの 99.99% の稼働率が実現可能で" -"す。しかしながら、OpenStack は個々のゲストインスタンスの可用性 99.99% を保証" -"できません。" - -msgid "" -"OpenStack is a set of multiple services exposed to the end users as HTTP(s) " -"APIs. Additionally, for own internal usage OpenStack requires SQL database " -"server and AMQP broker. The physical servers, where all the components are " -"running are often called controllers. This modular OpenStack architecture " -"allows to duplicate all the components and run them on different " -"controllers. By making all the components redundant it is possible to make " -"OpenStack highly-available." -msgstr "" -"OpenStack は、HTTP(s) API としてエンドユーザーに公開される、複数のサービス群" -"です。さらに、その内部利用のために、OpenStack は SQL データベースサーバーと " -"AMQP ブローカーを必要とします。すべてのコンポーネントが動作している、物理サー" -"バーはよくコントローラーと呼ばれます。このモジュール型の OpenStack アーキテク" -"チャーにより、すべてのコンポーネントを複製して、それらを別々のコントローラー" -"で実行できます。すべてのコンポーネントを冗長にすることにより、OpenStack の高" -"可用性を実現できます。" - -msgid "OpenStack network nodes" -msgstr "OpenStack ネットワークノード" - -msgid "OpenStack network nodes contain:" -msgstr "OpenStack ネットワークノードでは、以下のものが動作します。" - -msgid "" -"OpenStack services are configured with the list of these IP addresses so " -"they can select one of the addresses from those available." -msgstr "" -"OpenStack サービスは、利用できるものから 1 つを選択できるよう、これらの IP ア" -"ドレスの一覧を用いて設定されます。" - -msgid "" -"OpenStack supports a single-controller high availability mode that is " -"managed by the services that manage highly available environments but is not " -"actually highly available because no redundant controllers are configured to " -"use for failover. This environment can be used for study and demonstration " -"but is not appropriate for a production environment." -msgstr "" -"OpenStack は、シングルコントローラーの高可用性モードをサポートします。これ" -"は、高可用性環境を管理するソフトウェアにより、サービスが管理されますが、コン" -"トローラーがフェイルオーバーのために冗長化設定されていないため、実際には高可" -"用性ではありません。この環境は、学習やデモのために使用できますが、本番環境と" -"しては適していません。" - -msgid "Overview of high availability storage" -msgstr "高可用性ストレージの概要" - -msgid "Overview of highly-available compute nodes" -msgstr "高可用性コンピュートノードの概要" - -msgid "Overview of highly-available controllers" -msgstr "高可用性コントローラーの概要" - -msgid "Pacemaker cluster stack" -msgstr "Pacemaker クラスタースタック" - -msgid "" -"Pacemaker does not inherently (need or want to) understand the applications " -"it manages. Instead, it relies on resource agents (RAs), scripts that " -"encapsulate the knowledge of how to start, stop, and check the health of " -"each application managed by the cluster." -msgstr "" -"Pacemaker は、管理するアプリケーションを本質的に理解してません (必要ありませ" -"ん)。代わりに、リソースエージェント (RA) に依存します。これは、クラスターによ" -"り管理される各アプリケーションの起動、停止、ヘルスチェック方法に関する知識を" -"隠蔽するスクリプトです。" - -msgid "" -"Pacemaker relies on the `Corosync `_ " -"messaging layer for reliable cluster communications. Corosync implements the " -"Totem single-ring ordering and membership protocol. It also provides UDP and " -"InfiniBand based messaging, quorum, and cluster membership to Pacemaker." -msgstr "" -"Pacemaker は、高信頼なクラスター通信のために `Corosync `_ メッセージング層に依存します。Corosync は、Totem シン" -"グルリングによる順番制御とメンバーシッププロトコルを実装します。また、UDP や " -"InfiniBand ベースのメッセージング、クォーラム、クラスターメンバーシップを " -"Pacemaker に提供します。" - -msgid "" -"Pacemaker ships with a large set of OCF agents (such as those managing MySQL " -"databases, virtual IP addresses, and RabbitMQ), but can also use any agents " -"already installed on your system and can be extended with your own (see the " -"`developer guide `_)." -msgstr "" -"Pacemaker は、(MySQL データベース、仮想 IP アドレス、RabbitMQ などの) OCF " -"エージェントをたくさん同梱していますが、お使いのシステムにインストールした任" -"意のエージェントも使用できます。また、自身で拡張することもできます " -"(`developer guide `_ 参照)。" - -msgid "" -"Pacemaker uses an event-driven approach to cluster state processing. The " -"``cluster-recheck-interval`` parameter (which defaults to 15 minutes) " -"defines the interval at which certain Pacemaker actions occur. It is usually " -"prudent to reduce this to a shorter interval, such as 5 or 3 minutes." -msgstr "" -"Pacemaker は、クラスターの状態を処理するために、イベントドリブンのアプローチ" -"を使用します。 ``cluster-recheck-interval`` パラメーター (デフォルトは 15 " -"分) が、ある Pacemaker のアクションが発生する間隔を定義します。通常、5 分や " -"3 分など、より短い間隔に減らすことは慎重になるべきです。" - -msgid "" -"Packages in the Galera Cluster Debian repository are now available for " -"installation on your system." -msgstr "" -"これで Galera Cluster Debian リポジトリーにあるパッケージがお使いのシステムで" -"利用できます。" - -msgid "" -"Packages in the Galera Cluster Red Hat repository are not available for " -"installation on your system." -msgstr "" -"これで Galera Cluster Red Hat リポジトリーにあるパッケージがお使いのシステム" -"で利用できます。" - -msgid "" -"Packages in the Galera Cluster SUSE repository are now available for " -"installation." -msgstr "" -"これで Galera Cluster SUSE リポジトリーにあるパッケージがお使いのシステムで利" -"用できます。" - -msgid "Parameter" -msgstr "パラメーター" - -msgid "Percona XtraDB Cluster" -msgstr "Percona XtraDB Cluster" - -msgid "Percona XtraDB Cluster:" -msgstr "Percona XtraDB Cluster:" - -msgid "" -"Persistent block storage can survive instance termination and can also be " -"moved across instances like any external storage device. Cinder also has " -"volume snapshots capability for backing up the volumes." -msgstr "" -"永続ブロックストレージは、インスタンス終了後に残存して、任意の外部ストレージ" -"デバイスのようにインスタンスを越えて移動できます。Cinder は、ボリュームをバッ" -"クアップするために、ボリュームスナップショット機能も持ちます。" - -msgid "" -"Persistent storage exists outside all instances. Two types of persistent " -"storage are provided:" -msgstr "" -"永続ストレージは、すべてのインスタンスの外部にあります。2 種類の永続ストレー" -"ジが提供されます。" - -msgid "Possible options are:" -msgstr "利用できるオプションは次のとおりです。" - -msgid "" -"Preventing single points of failure can depend on whether or not a service " -"is stateless." -msgstr "" -"単一障害点をなくせるかは、サービスがステートレスであるかに依存する場合があり" -"ます。" - -msgid "Processor" -msgstr "プロセッサー" - -msgid "" -"Production servers should run (at least) three RabbitMQ servers; for testing " -"and demonstration purposes, it is possible to run only two servers. In this " -"section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. To " -"build a broker, we need to ensure that all nodes have the same Erlang cookie " -"file." -msgstr "" -"本番サーバーは、(少なくとも) 3 つの RabbitMQ サーバーを実行すべきです。テスト" -"やデモの目的の場合、サーバーを 2 つだけ実行することもできます。このセクション" -"では、``rabbit1`` と ``rabbit2`` という 2 つのノードを設定します。ブローカー" -"を構築するために、すべてのノードがきちんと同じ Erlang クッキーファイルを持つ" -"必要があります。" - -msgid "Proxy server" -msgstr "プロキシーサーバー" - -msgid "Query the quorum status" -msgstr "クォーラム状態を問い合わせます" - -msgid "RAID drives" -msgstr "RAID ドライブ" - -msgid "RHEL, Fedora, CentOS" -msgstr "RHEL, Fedora, CentOS" - -msgid "RabbitMQ" -msgstr "RabbitMQ" - -msgid "RabbitMQ HA cluster host:port pairs:" -msgstr "RabbitMQ HA クラスターの「ホスト:ポート」のペア:" - -msgid "" -"RabbitMQ nodes fail over both on the application and the infrastructure " -"layers." -msgstr "" -"RabbitMQ ノードは、アプリケーションとインフラ層の両方においてフェイルオーバー" -"します。" - -msgid "" -"Rather than configuring neutron here, we should simply mention physical " -"network HA methods such as bonding and additional node/network requirements " -"for L3HA and DVR for planning purposes." -msgstr "" -"ここで neutron を設定する代わりに、単にボンディングや物理的なネットワークの " -"HA について言及します。また、計画するために L3HA と DVR の追加ノードとネット" -"ワーク要件について言及します。" - -msgid "Receive notifications of quorum state changes" -msgstr "クォーラムの状態変更の通知を受け付けます" - -msgid "Recommended for testing." -msgstr "テスト向けの推奨。" - -msgid "Recommended solution by the Tooz project." -msgstr "Tooz プロジェクトによる推奨ソリューション。" - -msgid "Red Hat" -msgstr "Red Hat" - -msgid "Redundancy and failover" -msgstr "冗長性とフェールオーバー" - -msgid "Refresh ``zypper``:" -msgstr "``zypper`` を最新化します。" - -msgid "" -"Regardless of which flavor you choose, it is recommended that the clusters " -"contain at least three nodes so that we can take advantage of `quorum " -"`_." -msgstr "" -"選択したフレーバーに関わらず、`quorum `_ の利点を得るために、少なく" -"とも 3 ノードを持つクラスターを推奨します。" - -msgid "Remote backup facilities" -msgstr "リモートバックアップ機能" - -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database." -msgstr "" -"``CINDER_DBPASS`` を Block Storage データベース用に選択したパスワードで置き換" -"えます。" - -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database. Replace ``CINDER_PASS`` with the password you chose for the " -"``cinder`` user in the Identity service." -msgstr "" -"``CINDER_DBPASS`` を Block Storage サービス用に選択したパスワードで置き換えま" -"す。``CINDER_PASS`` を Identity サービスで ``cinder`` ユーザー用に選択したパ" -"スワードで置き換えます。" - -msgid "" -"Replace ``DISTRO`` with the name of the distribution you use, such as " -"``centos`` or ``fedora``. Replace ``RELEASE`` with the release number, such " -"as ``7`` for CentOS 7. Replace ``ARCH`` with your system architecture, such " -"as ``x86_64``" -msgstr "" -"``DISTRO`` を ``centos`` や ``fedora`` などの使用するディストリビューションの" -"名前で置き換えます。 ``RELEASE`` を CentOS 7 向けの ``7`` などのリリース番号" -"で置き換えます。 ``ARCH`` を ``x86_64`` などのシステムアーキテクチャーで置き" -"換えます。" - -msgid "" -"Replace ``VERSION`` with the version of MariaDB you want to install, such as " -"``5.6`` or ``10.0``. Replace ``PACKAGE`` with the package type and " -"architecture, such as ``rhel6-amd64`` for Red Hat 6 on 64-bit architecture." -msgstr "" -"インストールしたい MariaDB のバージョン、``5.6`` や ``10.0`` などで " -"``VERSION`` を置き換えます。パッケージ種別とアーキテクチャー、Red Hat 6 64 -" -"bit アーキテクチャー向けの ``rhel6-amd64`` などで ``PACKAGE`` を置き換えま" -"す。" - -msgid "" -"Replace the IP addresses given here with comma-separated list of each " -"OpenStack database in your cluster." -msgstr "" -"ここで指定された IP アドレスを、お使いのクラスターにある OpenStack の各データ" -"ベースのコンマ区切りリストに置き換えます。" - -msgid "" -"Restart AppArmor. For servers that use ``init``, run the following command:" -msgstr "" -"AppArmor を再起動します。``init`` を使用するサーバーの場合、以下のコマンドを" -"実行します。" - -msgid "Restarting the cluster" -msgstr "クラスターの再起動" - -msgid "Run neutron DHCP agent" -msgstr "Neutron DHCP エージェントの実行" - -msgid "Run neutron L3 agent" -msgstr "Neutron L3 エージェントの実行" - -msgid "Run neutron LBaaS agent" -msgstr "neutron LBaaS エージェントの実行" - -msgid "Run neutron metadata agent" -msgstr "Neutron メタデータエージェントの実行" - -msgid "Run the following commands on each node except the first one:" -msgstr "1 番目のノード以外の各ノードで以下のコマンドを実行します。" - -msgid "SELinux" -msgstr "SELinux" - -msgid "SELinux and AppArmor set to permit access to ``mysqld``;" -msgstr "SELinux や AppArmor が ``mysqld`` にアクセスを許可していること。" - -msgid "SLES 12" -msgstr "SLES 12" - -msgid "" -"SQL relational database server provides stateful type consumed by other " -"components. Supported databases are MySQL, MariaDB, and PostgreSQL. Making " -"SQL database redundant is complex." -msgstr "" -"SQL リレーショナルデータベースサーバーは、他のコンポーネントにより利用される" -"ステートフルな状態を提供します。サポートされるデータベースは、MySQL、" -"MariaDB、PostgreSQL です。SQL データベースを冗長化することは複雑です。" - -msgid "SUSE" -msgstr "SUSE" - -msgid "Search in this guide" -msgstr "ガイド内検索" - -msgid "" -"Security-Enhanced Linux is a kernel module for improving security on Linux " -"operating systems. It is commonly enabled and configured by default on Red " -"Hat-based distributions. In the context of Galera Cluster, systems with " -"SELinux may block the database service, keep it from starting or prevent it " -"from establishing network connections with the cluster." -msgstr "" -"Security-Enhanced Linux は、Linux オペレーティングシステムにおいてセキュリ" -"ティーを向上させるためのカーネルモジュールです。Red Hat 系のディストリビュー" -"ションでは、一般的にデフォルトで有効化され、設定されています。Galera Cluster " -"の観点では、SELinux を有効化したシステムは、データベースサービスをブロックす" -"るかもしれません。また、クラスターを起動しても、ネットワーク接続を確立できな" -"いかもしれません。" - -msgid "" -"See [TODO link] for more information about configuring networking for high " -"availability." -msgstr "" -"See [TODO link] for more information about configuring networking for high " -"availability." - -msgid "Segregated" -msgstr "Segregated" - -msgid "Service monitoring and recovery required" -msgstr "サービスモニタリングおよび必要なリカバリー" - -msgid "" -"Services like RabbitMQ and Galera have complicated boot-up sequences that " -"require co-ordination, and often serialization, of startup operations across " -"all machines in the cluster. This is especially true after site-wide failure " -"or shutdown where we must first determine the last machine to be active." -msgstr "" -"RabbitMQ や Galera などのサービスは、複雑な起動順番を持ちます。クラスター内の" -"全マシンに渡り、起動処理の協調動作を必要とし、しばしば順番に実行する必要があ" -"ります。とくに、サイト全体の障害後、最後にアクティブにするマシンを判断する必" -"要のあるシャットダウンのときに当てはまります。" - -msgid "Set SELinux to allow the database server to run:" -msgstr "SELinux を設定して、データベースサーバーの実行を許可します。" - -msgid "Set a password for hacluster user **on each host**." -msgstr "**各ホストにおいて** hacluster ユーザーのパスワードを設定します。" - -msgid "Set automatic L3 agent failover for routers" -msgstr "ルーター向け L3 エージェントの自動フェイルオーバーの設定" - -msgid "Set basic cluster properties" -msgstr "基本的なクラスターのプロパティの設定" - -msgid "Set up Corosync with multicast" -msgstr "マルチキャストを使う場合の Corosync の設定" - -msgid "Set up Corosync with unicast" -msgstr "ユニキャストを使う場合の Corosync の設定" - -msgid "Set up Corosync with votequorum library" -msgstr "votequorum ライブラリーを使う場合の Corosync の設定" - -msgid "Set up the cluster with `crmsh`" -msgstr "`crmsh` を用いたクラスターのセットアップ" - -msgid "Set up the cluster with `pcs`" -msgstr "`pcs` を用いたセットアップ" - -msgid "" -"Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) " -"feature; by default, it is disabled (set to 0). If a cluster is on the " -"quorum edge (``expected_votes:`` set to 7; ``online nodes:`` set to 4) for " -"longer than the time specified for the ``last_man_standing_window`` " -"parameter, the cluster can recalculate quorum and continue operating even if " -"the next node will be lost. This logic is repeated until the number of " -"online nodes in the cluster reaches 2. In order to allow the cluster to step " -"down from 2 members to only 1, the ``auto_tie_breaker`` parameter needs to " -"be set; this is not recommended for production environments." -msgstr "" -"``last_man_standing`` を 1 に設定することにより、Last Man Standing (LMS) 機能" -"を有効化できます。デフォルトで、無効化されています (0 に設定)。クラスターが、" -"``last_man_standing_window`` パラメーターに指定した時間より長く、クォーラム" -"エッジ (``expected_votes:`` が 7 に設定、 ``online nodes:`` が 4 に設定) にあ" -"る場合、クラスターはクォーラムを再計算して、次のノードが失われても動作を継続" -"します。この論理は、クラスターのオンラインノードが 2 になるまで繰り返されま" -"す。クラスターが 2 つのメンバーから 1 つだけに減ることを許可するために、 " -"``auto_tie_breaker`` パラメーターを設定する必要があります。これは本番環境では" -"推奨されません。" - -msgid "" -"Setting ``wait_for_all`` to 1 means that, When starting up a cluster (all " -"nodes down), the cluster quorum is held until all nodes are online and have " -"joined the cluster for the first time. This parameter is new in Corosync 2.0." -msgstr "" -"``wait_for_all`` を 1 に設定することは、クラスター起動 (全ノードダウン) 時、" -"クラスターのクォーラムは、すべてのノードがオンラインになり、まずクラスターに" -"参加するまで保持されることを意味しますこのパラメーターは Corosync 2.0 の新機" -"能です。" - -msgid "" -"Setting the ``pe-warn-series-max``, ``pe-input-series-max`` and ``pe-error-" -"series-max`` parameters to 1000 instructs Pacemaker to keep a longer history " -"of the inputs processed and errors and warnings generated by its Policy " -"Engine. This history is useful if you need to troubleshoot the cluster." -msgstr "" -"パラメーター ``pe-warn-series-max``, ``pe-input-series-max``, ``pe-error-" -"series-max`` を 1000 に設定することにより、Pacemaker が処理した入力履歴、ポリ" -"シーエンジンにより生成されたログと警告を保持するよう指定できます。この履歴" -"は、クラスターのトラブルシューティングを必要とする場合に役立ちます。" - -msgid "Simplified process for adding/removing of nodes" -msgstr "ノードの追加と削除を簡単化したプロセス" - -msgid "" -"Since the cluster is a single administrative domain, it is generally " -"accepted to use the same password on all nodes." -msgstr "" -"クラスターは単一の管理ドメインなので、一般的にすべてのノードで同じパスワード" -"を使用できます。" - -msgid "Single-controller high availability mode" -msgstr "シングルコントローラーの高可用性モード" - -msgid "" -"Specifying ``corosync_votequorum`` enables the votequorum library; this is " -"the only required option." -msgstr "" -"``corosync_votequorum`` を指定することにより、votequorum ライブラリーを有効化" -"します。これは唯一の必須オプションです。" - -msgid "Start Corosync" -msgstr "Corosync の開始" - -msgid "Start Pacemaker" -msgstr "Pacemaker の開始" - -msgid "" -"Start the ``xinetd`` daemon for ``clustercheck``. For servers that use " -"``init``, run the following commands:" -msgstr "" -"``clustercheck`` の ``xinetd`` デーモンを起動します。 ``init`` を使用するサー" -"バーの場合、以下のコマンドを実行します。" - -msgid "" -"Start the database server on all other cluster nodes. For servers that use " -"``init``, run the following command:" -msgstr "" -"すべての他のクラスターノードにおいてデータベースサーバーを起動します。" -"``init`` を使用するサーバーに対して、以下のコマンドを実行します。" - -msgid "" -"Start the message queue service on all nodes and configure it to start when " -"the system boots." -msgstr "" -"すべてのノードにおいてメッセージキューサービスを起動し、システム起動時に起動" -"するよう設定します。" - -msgid "" -"Starting up one instance of the service on several controller nodes, when " -"they can coexist and coordinate by other means. RPC in ``nova-conductor`` is " -"one example of this." -msgstr "" -"いくつかのコントローラノードで、一つのサービスインスタンスが開始します。それ" -"らは、ほかの意味で、共存、調和できるということであり、``nova-conductor``のRPC" -"はその例の一つです。" - -msgid "Stateful service" -msgstr "ステートフルサービス" - -msgid "Stateful services may be configured as active/passive or active/active:" -msgstr "" -"ステートフルサービスは、アクティブ/パッシブまたはアクティブ/アクティブとして" -"設定できます。" - -msgid "Stateless service" -msgstr "ステートレスサービス" - -msgid "Stateless vs. stateful services" -msgstr "ステートレスサービスとステートフルサービス" - -msgid "Storage" -msgstr "ストレージ" - -msgid "Storage back end" -msgstr "ストレージバックエンド" - -msgid "Storage components" -msgstr "ストレージ構成要素" - -msgid "TBA" -msgstr "TBA" - -msgid "" -"TCP generally holds on to hope for a long time. A ballpark estimate is " -"somewhere on the order of tens of minutes (30 minutes is commonly " -"referenced). During this time it will keep probing and trying to deliver the " -"data." -msgstr "" -"TCP は一般的に長く接続されています。概算として数十分 (一般的に 30 分として参" -"照されます) のレベルです。この間、プルーブして、データを配送しようとします。" - -msgid "Telemetry" -msgstr "Telemetry" - -msgid "Telemetry central agent" -msgstr "Telemetry 中央エージェント" - -msgid "" -"The :command:`crm configure` command supports batch input, so you may copy " -"and paste the above into your live Pacemaker configuration and then make " -"changes as required. For example, you may enter edit ``p_ip_glance-api`` " -"from the :command:`crm configure` menu and edit the resource to match your " -"preferred virtual IP address." -msgstr "" -":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " -"pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。例えば、" -"お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メニュー" -"から ``edit p_ip_glance-api`` と入力し、リソースを編集できます。" - -msgid "" -"The :command:`crm configure` supports batch input, so you may copy and paste " -"the lines above into your live Pacemaker configuration and then make changes " -"as required. For example, you may enter ``edit p_ip_manila-api`` from the :" -"command:`crm configure` menu and edit the resource to match your preferred " -"virtual IP address." -msgstr "" -":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " -"pacemaker 設定の中に上の行をコピー・ペーストし、適宜変更を反映できます。例え" -"ば、お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メ" -"ニューから ``edit p_ip_manila-api`` と入力し、リソースを編集できます。" - -msgid "" -"The Galera cluster configuration directive ``backup`` indicates that two of " -"the three controllers are standby nodes. This ensures that only one node " -"services write requests because OpenStack support for multi-node writes is " -"not yet production-ready." -msgstr "" -"この Galera cluster の設定ディレクティブ ``backup`` は、3 つのコントローラー" -"の内 2 つがスタンバイノードであることを意味します。" - -msgid "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance only impacts a percentage of the objects " -"and the client automatically removes it from the list of instances. The SLA " -"is several minutes." -msgstr "" -"Memcached クライアントは、インスタンス間でオブジェクトを分散するハッシュ機能" -"を持ちます。インスタンスの障害は、オブジェクトの使用率のみに影響します。クラ" -"イアントは、インスタンスの一覧から自動的に削除されます。SLA は数分です。" - -msgid "" -"The OpenStack Image service offers a service for discovering, registering, " -"and retrieving virtual machine images. To make the OpenStack Image API " -"service highly available in active / passive mode, you must:" -msgstr "" -"OpenStack Image サービスは、仮想マシンイメージを検索、登録、取得するための" -"サービスを提供します。OpenStack Image API サービスをアクティブ/パッシブモード" -"で高可用性にするために、以下が必要になります。" - -msgid "" -"The OpenStack Installation Guides also include a list of the services that " -"use passwords with important notes about using them." -msgstr "" -"OpenStack インストールガイドは、パスワードを使用するサービスの一覧、それらを" -"使用する上の重要な注意点もまとめてあります。" - -msgid "" -"The OpenStack Networking service has a scheduler that lets you run multiple " -"agents across nodes; the DHCP agent can be natively highly available. To " -"configure the number of DHCP agents per network, modify the " -"``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron." -"conf` file. By default this is set to 1. To achieve high availability, " -"assign more than one DHCP agent per network." -msgstr "" -"OpenStack Networking サービスには、ノードにまたがって複数のエージェントを実行" -"できるスケジューラーがあります。 DHCP エージェントは本質的に高可用性がありま" -"す。ネットワークあたりの DHCP エージェント数を設定するには、 file:`/etc/" -"neutron/neutron.conf` ファイルの``dhcp_agents_per_network`` パラメーターを変" -"更します。このパラメーターのデフォルト値は 1 です。高可用性を持たせるには、" -"ネットワークあたりの DHCP エージェント数を 1 以上にする必要があります。" - -msgid "The Pacemaker architecture" -msgstr "Pacemaker アーキテクチャー" - -msgid "" -"The Pacemaker service also requires an additional configuration file ``/etc/" -"corosync/uidgid.d/pacemaker`` to be created with the following content:" -msgstr "" -"Pacemaker サービスは、以下の内容で作成された、追加の設定ファイル ``/etc/" -"corosync/uidgid.d/pacemaker`` も必要とします。" - -msgid "" -"The Telemetry API service configuration does not have the ``option httpchk`` " -"directive as it cannot process this check properly. TODO: explain why the " -"Telemetry API is so special" -msgstr "" -"The Telemetry API service configuration does not have the ``option httpchk`` " -"directive as it cannot process this check properly. TODO: explain why the " -"Telemetry API is so special" - -msgid "" -"The Telemetry central agent can be configured to partition its polling " -"workload between multiple agents, enabling high availability." -msgstr "" -"Telemetry 中央エージェントは、高可用性を有効化した、複数のエージェント間で" -"ポーリングする負荷を分割するよう設定できます。" - -msgid "" -"The `Installation Guide `_ gives instructions for installing multiple compute nodes. To make " -"them highly available, you must configure the environment to include " -"multiple instances of the API and other services." -msgstr "" -"`インストールガイド `_ に" -"複数のコンピュートノードのインストール方法について記載されています。それらを" -"高可用性にするために、API と他のサービスの複数インスタンスなど、環境を設定す" -"る必要があります。" - -msgid "" -"The `Tooz `__ library provides the " -"coordination within the groups of service instances. It provides an API " -"above several back ends that can be used for building distributed " -"applications." -msgstr "" -"`Tooz `__ ライブラリーは、サービスインスタ" -"ンスのグループ内に条件を提供します。分散アプリケーションを構築するために使用" -"できる、いくつかのバックエンドに上の API を提供します。" - -msgid "" -"The ``admin_bind_host`` parameter lets you use a private network for admin " -"access." -msgstr "" -"``admin_bind_host`` パラメーターにより、管理アクセスのためのプライベートネッ" -"トワークを使用できます。" - -msgid "" -"The ``bindnetaddr`` is the network address of the interfaces to bind to. The " -"example uses two network addresses of /24 IPv4 subnets." -msgstr "" -"``bindnetaddr`` は、バインドするインターフェースのネットワークアドレスです。" -"この例は、2 つの /24 IPv4 サブネットを使用します。" - -msgid "" -"The ``token`` value specifies the time, in milliseconds, during which the " -"Corosync token is expected to be transmitted around the ring. When this " -"timeout expires, the token is declared lost, and after " -"``token_retransmits_before_loss_const lost`` tokens, the non-responding " -"processor (cluster node) is declared dead. In other words, ``token × " -"token_retransmits_before_loss_const`` is the maximum time a node is allowed " -"to not respond to cluster messages before being considered dead. The default " -"for token is 1000 milliseconds (1 second), with 4 allowed retransmits. These " -"defaults are intended to minimize failover times, but can cause frequent " -"\"false alarms\" and unintended failovers in case of short network " -"interruptions. The values used here are safer, albeit with slightly extended " -"failover times." -msgstr "" -"``token`` の値は、Corosync トークンがリング内を転送されることが予想される時間" -"をミリ秒単位で指定します。このタイムアウトを過ぎると、トークンが失われます。 " -"``token_retransmits_before_loss_const lost`` トークンの後、応答しないプロセッ" -"サー (クラスターノード) が停止していると宣言されます。言い換えると、 ``token " -"× token_retransmits_before_loss_const`` は、ノードが停止とみなされるまでに、" -"クラスターメッセージに応答しないことが許される最大時間です。トークン向けのデ" -"フォルトは、1000 ミリ秒 (1 秒)、4 回の再送許可です。これらのデフォルト値は、" -"フェイルオーバー時間を最小化することを意図していますが、頻繁な「誤検知」と短" -"いネットワーク中断による意図しないフェイルオーバーを引き起こす可能性がありま" -"す。ここで使用される値は、フェイルオーバー時間がわずかに長くなりますが、より" -"安全です。" - -msgid "" -"The ``transport`` directive controls the transport mechanism used. To avoid " -"the use of multicast entirely, specify the ``udpu`` unicast transport " -"parameter. This requires specifying the list of members in the ``nodelist`` " -"directive; this could potentially make up the membership before deployment. " -"The default is ``udp``. The transport type can also be set to ``udpu`` or " -"``iba``." -msgstr "" -"``transport`` ディレクティブは使用するトランスポートメカニズムを制御します。 " -"マルチキャストを完全に無効にするためには、``udpu`` ユニキャストトランスポート" -"パラメーターを指定します。``nodelist`` ディレクティブにメンバー一覧を指定する" -"必要があります。展開する前にメンバーシップを構成することができます。デフォル" -"トは ``udp`` です。トランスポート形式は ``udpu`` や ``iba`` に設定することも" -"できます。" - -msgid "" -"The application layer is controlled by the ``oslo.messaging`` configuration " -"options for multiple AMQP hosts. If the AMQP node fails, the application " -"reconnects to the next one configured within the specified reconnect " -"interval. The specified reconnect interval constitutes its SLA." -msgstr "" -"アプリケーション層は、複数 AMQP ホスト向けの ``oslo.messaging`` 設定オプショ" -"ンにより制御されます。AMQP ノードが故障したとき、アプリケーションが、指定され" -"た再接続間隔で、設定された次のノードに再接続します。" - -msgid "" -"The availability check of the instances is provided by heartbeat messages. " -"When the connection with an instance is lost, the workload will be " -"reassigned within the remained instances in the next polling cycle." -msgstr "" -"インスタンスの死活監視は、ハートビートメッセージによって提供されます。インス" -"タンスとの接続が失われた時、次のポーリングサイクルにて、ワークロードは、残っ" -"たインスタンスの中で再割り当てが行われます。" - -msgid "" -"The benefits of this approach are the physical isolation between components " -"and the ability to add capacity to specific components." -msgstr "" -"この方法の利点は、コンポーネント間の物理的な隔離、特定のコンポーネントへの" -"キャパシティーの追加です。" - -msgid "" -"The cloud controller runs on the management network and must talk to all " -"other services." -msgstr "" -"クラウドコントローラーは、管理ネットワークで動作し、他のすべてのサービスと通" -"信できる必要があります。" - -msgid "" -"The cluster is fully operational with ``expected_votes`` set to 7 nodes " -"(each node has 1 vote), quorum: 4. If a list of nodes is specified as " -"``nodelist``, the ``expected_votes`` value is ignored." -msgstr "" -"このクラスターは、7 ノード (各ノードが 1 つの投票権を持つ)、クォーラム 4 つに" -"設定した ``expected_votes`` で完全に動作します。ノードの一覧は ``nodelist`` " -"に指定された場合、 ``expected_votes`` の値は無視されます。" - -msgid "" -"The command :command:`crm configure` supports batch input, so you may copy " -"and paste the lines above into your live pacemaker configuration and then " -"make changes as required. For example, you may enter ``edit p_ip_cinder-" -"api`` from the :command:`crm configure` menu and edit the resource to match " -"your preferred virtual IP address." -msgstr "" -":command:`crm configure` コマンドはバッチ入力をサポートします。そのため、現在" -"の Pacemaker 設定の中に上の行をコピー・ペーストし、適宜変更を反映できます。例" -"えば、お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メ" -"ニューから ``edit p_ip_cinder-api`` と入力し、リソースを編集できます。" - -msgid "" -"The commands for installing RabbitMQ are specific to the Linux distribution " -"you are using:" -msgstr "" -"RabbitMQ のインストールコマンドは、使用している Linux ディストリビューション" -"により異なります。" - -msgid "" -"The common practice is to locate an HAProxy instance on each OpenStack " -"controller in the environment." -msgstr "" -"一般的なプラクティスは、環境内の各 OpenStack コントローラーに HAProxy インス" -"タンスを置くことです。" - -msgid "" -"The configuration uses static routing without Virtual Router Redundancy " -"Protocol (VRRP) or similar techniques implemented." -msgstr "" -"この設定は、Virtual Router Redundancy Protocol (VRRP) や類似技術を実装するこ" -"となく、静的ルーティングを使用します。" - -msgid "" -"The correct path to ``libgalera_smm.so`` given to the ``wsrep_provider`` " -"parameter." -msgstr "" -"``wsrep_provider`` パラメーターに指定された ``libgalera_smm.so`` への適切なパ" -"ス。" - -msgid "" -"The current design of the neutron LBaaS agent using the HAProxy driver does " -"not allow high availability for the tenant load balancers. The neutron-lbaas-" -"agent service will be enabled and running on all controllers, allowing for " -"load balancers to be distributed across all nodes. However, a controller " -"node failure will stop all load balancers running on that node until the " -"service is recovered or the load balancer is manually removed and created " -"again." -msgstr "" -"現在の HAProxy ドライバーを使用する neutron LBaaS エージェントは、テナントの" -"ロードバランサーの高可用性を実現できません。neutron-lbaas-agent サービスが有" -"効化され、すべてのコントローラーにおいて実行され、ロードバランサーがすべての" -"ノードにわたり分散されることを許可します。しかしながら、コントローラーノード" -"の障害は、サービスが復旧されるまで、またはロードバランサーが手動で削除され、" -"再び追加されるまで、そのノードで動作しているロードバランサーをすべて停止しま" -"す。" - -msgid "" -"The default node type is a disc node. In this guide, nodes join the cluster " -"as RAM nodes." -msgstr "" -"デフォルトのノード種別は disc ノードです。このガイドでは、ノードは RAM ノード" -"としてクラスターに参加します。" - -msgid "" -"The first step in setting up your highly-available OpenStack cluster is to " -"install the operating system on each node. Follow the instructions in the " -"OpenStack Installation Guides:" -msgstr "" -"高可用性 OpenStack クラスターをセットアップする第一歩は、各ノードにオペレー" -"ティングシステムをインストールすることです。OpenStack インストールガイドにあ" -"る手順に従ってください。" - -msgid "" -"The first step is to install the database that sits at the heart of the " -"cluster. To implement high availability, run an instance of the database on " -"each controller node and use Galera Cluster to provide replication between " -"them. Galera Cluster is a synchronous multi-master database cluster, based " -"on MySQL and the InnoDB storage engine. It is a high-availability service " -"that provides high system uptime, no data loss, and scalability for growth." -msgstr "" -"最初の手順は、クラスターの中心になるデータベースをインストールすることです。" -"高可用性を実現するために、各コントローラーノードにおいてデータベースを実行" -"し、ノード間でレプリケーションできる Galera Cluster を使用します。Galera " -"Cluster は、MySQL と InnoDB ストレージエンジンをベースにした、同期型のマルチ" -"マスターデータベースクラスターです。高いシステム稼働時間、データ損失なし、ス" -"ケーラビリティーを提供する、高可用性サービスです。" - -msgid "" -"The following components are currently unable to benefit from the use of a " -"proxy server:" -msgstr "" -"以下のコンポーネントは、現在、プロキシサーバーの利用による利点はありません。" - -msgid "The following components/services can work with HA queues:" -msgstr "以下のコンポーネントやサービスは、HA キューを用いて動作できます。" - -msgid "" -"The following diagram shows a very simplified view of the different " -"strategies used to achieve high availability for the OpenStack services:" -msgstr "" -"以下の図は、OpenStack サービスの高可用性を達成するために使用される、さまざま" -"な方法を非常に簡略化した図を表します。" - -msgid "The keepalived architecture" -msgstr "keepalived アーキテクチャー" - -msgid "" -"The most popular AMQP implementation used in OpenStack installations is " -"RabbitMQ." -msgstr "" -"OpenStack 環境に使用される最も一般的な AMQP ソフトウェアは RabbitMQ です。" - -msgid "" -"The neutron L3 agent is scalable, due to the scheduler that supports Virtual " -"Router Redundancy Protocol (VRRP) to distribute virtual routers across " -"multiple nodes. To enable high availability for configured routers, edit " -"the :file:`/etc/neutron/neutron.conf` file to set the following values:" -msgstr "" -"neutron L3 エージェントは、スケーラブルです。複数のノードにわたり仮想ルーター" -"を分散するために、スケジューラーが Virtual Router Redundancy Protocol (VRRP) " -"をサポートするためです。設定済みのルーターを高可用化するために、 :file:`/etc/" -"neutron/neutron.conf` ファイルを編集して、以下の値を設定します。" - -msgid "" -"The other is optimized for Active/Active services that do not require any " -"inter-machine coordination. In this setup, services are started by your init " -"system (systemd in most modern distributions) and a tool is used to move IP " -"addresses between the hosts. The most common package for doing this is " -"keepalived." -msgstr "" -"他には、マシン間の調整を必要としないアクティブ/アクティブなサービスに最適化さ" -"れています。このセットアップでは、サービスが init システム (最近のディストリ" -"ビューションは systemd) により起動され、ツールがホスト間で IP アドレスを移動" -"するために使用されます。これを実行するための最も一般的なパッケージは " -"keepalived です。" - -msgid "" -"The service declaration for the pacemaker service may be placed in the :file:" -"`corosync.conf` file directly or in its own separate file, :file:`/etc/" -"corosync/service.d/pacemaker`." -msgstr "" -"Pacemaker サービスに関するサービス定義は、直接 :file:`corosync.conf` ファイル" -"にあるか、単独ファイル :file:`/etc/corosync/service.d/pacemaker` にある可能性" -"があります。" - -msgid "" -"The source address for the connection from HAProxy back to the client is the " -"VIP address. However the VIP address is no longer present on the host. This " -"means that the network (IP) layer deems the packet unroutable, and informs " -"the transport (TCP) layer. TCP, however, is a reliable transport. It knows " -"how to handle transient errors and will retry. And so it does." -msgstr "" -"HAProxy プロキシーからクライアントに戻る接続の送信元アドレスは、仮想 IP アド" -"レスになります。しかしながら、仮想 IP アドレスはすでにホストに存在しません。" -"つまり、ネットワーク (IP) 層はパケットをルーティングできないと判断して、トラ" -"ンスポート (TCP) 層に通知します。しかしながら、TCP は信頼できる転送になりま" -"す。一時的なエラーを処理して、再試行する方法がわかっているからです。また、実" -"際にそうします。" - -msgid "The standard hardware requirements:" -msgstr "標準的なハードウェア要件:" - -msgid "The steps to implement the Pacemaker cluster stack are:" -msgstr "Pacemaker クラスタースタックを実行する手順は、次のとおりです。" - -msgid "" -"The votequorum library has been created to replace and eliminate qdisk, the " -"disk-based quorum daemon for CMAN, from advanced cluster configurations." -msgstr "" -"votequorum ライブラリーは、高度なクラスター設定により、qdisk、CMAN 向けディス" -"クベースのクォーラムデーモンを置き換えて除去するために作成されます。" - -msgid "" -"The votequorum library is part of the corosync project. It provides an " -"interface to the vote-based quorum service and it must be explicitly enabled " -"in the Corosync configuration file. The main role of votequorum library is " -"to avoid split-brain situations, but it also provides a mechanism to:" -msgstr "" -"votequorum ライブラリーは corosync プロジェクトの一部です。投票ベースのクォー" -"ラムサービスへのインターフェースを提供し、Corosync 設定ファイルにおいて明示的" -"に有効化する必要があります。votequorum ライブラリーのおもな役割は、スプリット" -"ブレイン状態を避けるためですが、以下の機能も提供します。" - -msgid "" -"There are known issues with cinder-volume that recommend setting it as " -"active-passive for now, see: https://blueprints.launchpad.net/cinder/+spec/" -"cinder-volume-active-active-support" -msgstr "" -"今のところ、cinder-volume に既知の問題があり、アクティブ/パッシブとして設定す" -"ることを推奨します。https://blueprints.launchpad.net/cinder/+spec/cinder-" -"volume-active-active-support を参照してください。" - -msgid "There are primarily two HA architectures in use today." -msgstr "今日使用される主要な HA アーキテクチャーは 2 つあります。" - -msgid "" -"There are three implementations of Galera Cluster: MySQL, MariaDB and " -"Percona XtraDB. For each implementation, there is a software repository that " -"provides binary packages for Debian, Red Hat, and SUSE-based Linux " -"distributions." -msgstr "" -"Galera Cluster の実装が 3 種類あります。MySQL、MariaDB、Percona XtraDB です。" -"それぞれ、Debian 系、Red Hat 系、SUSE 系の Linux ディストリビューション向けの" -"バイナリーパッケージを提供するソフトウェアリポジトリーがあります。" - -msgid "" -"These agents must conform to one of the `OCF `_, `SysV Init " -"`_, Upstart, or Systemd standards." -msgstr "" -"これらのエージェントは、 `OCF `_, `SysV Init `_, Upstart, Systemd 標準に従う必要があります。" - -msgid "" -"This architecture has some inherent limitations that should be kept in mind " -"during deployment and daily operations. The following sections describe " -"these limitations." -msgstr "" -"このアーキテクチャーは、いくつかの本来的な制約を持ちます。導入や日々の運用に" -"おいて心に留めておく必要があります。以下のセクションは、これらの制限について" -"記載します。" - -msgid "" -"This configuration creates ``p_cinder-api``, a resource for managing the " -"Block Storage API service." -msgstr "" -"この設定は Block Storage API サービスを管理するためのリソース ``p_cinder-" -"api`` を作成します。" - -msgid "" -"This configuration creates ``p_glance-api``, a resource for managing the " -"OpenStack Image API service." -msgstr "" -"この設定は ``p_glance-api`` を作成します。これは OpenStack Image API サービス" -"を管理するリソースです。" - -msgid "" -"This configuration creates ``p_keystone``, a resource for managing the " -"OpenStack Identity service." -msgstr "" -"この設定は OpenStack Identity サービスを管理するためのリソース " -"``p_keystone`` を作成します。" - -msgid "" -"This configuration creates ``p_manila-api``, a resource for managing the " -"Shared File Systems API service." -msgstr "" -"この設定は Shared File Systems API サービスを管理するためのリソース " -"``p_manila-api`` を作成します。" - -msgid "" -"This configuration creates ``vip``, a virtual IP address for use by the API " -"node (``10.0.0.11``):" -msgstr "" -"この設定は、API ノード (``10.0.0.11``) により使用される仮想 IP アドレス " -"``vip`` を作成します。" - -msgid "" -"This example assumes that you are using NFS for the physical storage, which " -"will almost never be true in a production installation." -msgstr "" -"この例は、物理ストレージに NFS を使用していることを仮定します。これは、ほとん" -"どの本番環境のインストールにおいて正しくありません。" - -msgid "" -"This guide describes how to install and configure OpenStack for high " -"availability. It supplements the OpenStack Installation Guides and assumes " -"that you are familiar with the material in those guides." -msgstr "" -"このガイドでは、OpenStack に高可用性を持たせるにはどのようにインストールと設" -"定を行うかを説明します。 OpenStack インストールガイドを補完する位置付けであ" -"り、インストールガイドの内容を前提に書かれています。" - -msgid "" -"This guide documents OpenStack Liberty, OpenStack Kilo, and OpenStack Juno " -"releases." -msgstr "" -"このガイドででは、OpenStack Liberty, OpenStack Kilo, OpenStack Juno のリリー" -"スを対象としています。" - -msgid "" -"This guide is a work-in-progress and changing rapidly while we continue to " -"test and enhance the guidance. Please note where there are open \"to do\" " -"items and help where you are able." -msgstr "" -"このガイドは、作成中であり、頻繁に変更されています。テストと内容の改善を継続" -"しています。「To Do」項目が残っていますので、手伝える部分があれば手伝ってくだ" -"さい。" - -msgid "This guide uses the following example IP addresses:" -msgstr "このガイドは、以下の IP アドレス例を使用します。" - -msgid "This is the most common option and the one we document here." -msgstr "これは最も一般的なオプションで、ここにドキュメント化します。" - -msgid "" -"This is why setting the quorum to a value less than floor(n/2) + 1 is " -"dangerous. However it may be required for some specific cases, like a " -"temporary measure at a point it is known with 100% certainty that the other " -"nodes are down." -msgstr "" -"これがクォーラムの値を floor(n/2) + 1 より小さく設定することが危険な理由で" -"す。しかしながら、いくつかの特別な場合に必要となる可能性があります。例えば、" -"他のノードが 100% 確実に停止していることがわかっている場合の一時的な計測など" -"です。" - -msgid "" -"This makes the instances of HAProxy act independently and fail over " -"transparently together with the network endpoints (VIP addresses) failover " -"and, therefore, shares the same SLA." -msgstr "" -"HAProxy のインスタンスが独立して動作して、ネットワークエンドポイント (仮想 " -"IP アドレス) のフェールオーバーと一緒に透過的にフェールオーバーするため、同" -"じ SLA を共有します。" - -msgid "" -"This scenario can be visualized as below, where each box below represents a " -"cluster of three or more guests." -msgstr "" -"このシナリオは、以下のように可視化できます。以下の各ボックスは 3 つ以上のゲス" -"トのクラスターを表します。" - -msgid "This scenario can be visualized as below." -msgstr "このシナリオは以下のように可視化できます。" - -msgid "" -"This scenario has the advantage of requiring far fewer, if more powerful, " -"machines. Additionally, being part of a single cluster allows us to " -"accurately model the ordering dependencies between components." -msgstr "" -"このシナリオは、より高性能ならば、より少ないマシンを必要とする利点がありま" -"す。加えて、シングルクラスターの一部になることにより、コンポーネント間の順序" -"依存関係を正確にモデル化できます。" - -msgid "" -"This section assumes that you are familiar with the `documentation `_ for " -"installing the OpenStack Image API service." -msgstr "" -"このセクションは、OpenStack Image API サービスのインストールに関する `ドキュ" -"メント `_ に慣れていることを仮定しています。" - -msgid "" -"This section discusses ways to protect against data loss in your OpenStack " -"environment." -msgstr "" -"このセクションは、お使いの OpenStack 環境におけるデータ損失から保護する方法を" -"議論します。" - -msgid "" -"This value increments with each transaction, so the most advanced node has " -"the highest sequence number, and therefore is the most up to date." -msgstr "" -"この値は各トランザクションによりインクリメントされます。ほとんどの高度なノー" -"ドは、最大のシーケンス番号を持つため、ほとんど最新です。" - -msgid "" -"To be sure that all data is highly available, ensure that everything is " -"stored in the MySQL database (which is also highly available):" -msgstr "" -"すべてのものを (高可用性) MySQL データベースに保存して、すべてのデータが高可" -"用性になっていることを確認します。" - -msgid "" -"To configure AppArmor to work with Galera Cluster, complete the following " -"steps on each cluster node:" -msgstr "" -"各クラスターノードにおいて以下の手順を実行して、Galera Cluster を正常に動作さ" -"せるために AppArmor を設定します。" - -msgid "" -"To configure SELinux to permit Galera Cluster to operate, complete the " -"following steps on each cluster node:" -msgstr "" -"各クラスターノードにおいて以下の手順を実行して、Galera Cluster の動作を許可す" -"るために SELinux を設定します。" - -msgid "" -"To do so, stop RabbitMQ everywhere and copy the cookie from the first node " -"to each of the other node(s):" -msgstr "" -"そうするために、すべての場所で RabbitMQ を停止して、1 番目のノードのクッキー" -"を他のノードにコピーします。" - -msgid "" -"To ensure that all queues except those with auto-generated names are " -"mirrored across all running nodes, set the ``ha-mode`` policy key to all by " -"running the following command on one of the nodes:" -msgstr "" -"自動生成された名前を持つキューを除いて、すべてのキューがすべての動作中のノー" -"ドで確実にミラーするために、以下のコマンドをどこかのノードで実行して、 ``ha-" -"mode`` ポリシーキーを all に設定します。" - -msgid "" -"To find the most advanced cluster node, you need to check the sequence " -"numbers, or seqnos, on the last committed transaction for each. You can find " -"this by viewing ``grastate.dat`` file in database directory," -msgstr "" -"最も高度なクラスターノードを見つけるために、各ノードの最新コミットのトランザ" -"クションにあるシーケンス番号を確認する必要があります。データベースディレクト" -"リーにある ``grastate.dat`` ファイルを表示すると、これを見つけられます。" - -msgid "" -"To implement any changes made to this you must restart the HAProxy service" -msgstr "" -"これの変更を反映するために、HAProxy サービスを再起動する必要があります。" - -msgid "" -"To install and configure memcached, read the `official documentation " -"`_." -msgstr "" -"memcached をインストールして設定する方法は、 `公式ドキュメント `_ を参照してください。" - -msgid "" -"To make this configuration persistent, repeat the above commands with the :" -"option:`--permanent` option." -msgstr "" -":option:`--permanent` オプションを付けて上のコマンドを繰り返して、この設定を" -"永続化します。" - -msgid "To start the cluster, complete the following steps:" -msgstr "以下の手順を実行して、クラスターを起動します。" - -msgid "To verify the cluster status:" -msgstr "クラスターの状態を確認する方法:" - -msgid "" -"Tooz supports `various drivers `__ including the following back end solutions:" -msgstr "" -"Tooz は、以下のバックエンドソリューションを含む、 `さまざまなドライバー " -"`__ をサポートします。" - -msgid "True" -msgstr "True (真)" - -msgid "" -"Typically, an active/active installation for a stateless service maintains a " -"redundant instance, and requests are load balanced using a virtual IP " -"address and a load balancer such as HAProxy." -msgstr "" -"一般的にステートレスサービスをアクティブ / アクティブにインストールすると、冗" -"長なインスタンスを維持することになります。リクエストは HAProxy のような仮想 " -"IP アドレスとロードバランサーを使用して負荷分散されます。" - -msgid "Ubuntu, Debian" -msgstr "Ubuntu, Debian" - -msgid "Update the local cache." -msgstr "ローカルキャッシュを更新します。" - -msgid "Use HA queues in RabbitMQ (x-ha-policy: all):" -msgstr "RabbitMQ における HA キューの使用 (x-ha-policy: all):" - -msgid "" -"Use MySQL/Galera in active/passive mode to avoid deadlocks on ``SELECT ... " -"FOR UPDATE`` type queries (used, for example, by nova and neutron). This " -"issue is discussed more in the following:" -msgstr "" -"MySQL/Galera をアクティブ/パッシブモードで使用して、 ``SELECT ... FOR " -"UPDATE`` のような形式のクエリーにおけるデッドロックを避けます (例えば、nova " -"や neutron により使用されます)。この問題は、以下において詳細に議論されていま" -"す。" - -msgid "Use durable queues in RabbitMQ:" -msgstr "RabbitMQ での永続キューの使用:" - -msgid "" -"Use that password to authenticate to the nodes which will make up the " -"cluster. The :option:`-p` option is used to give the password on command " -"line and makes it easier to script." -msgstr "" -"このパスワードを使用して、クラスターを構成するノードに認証します。 :option:`-" -"p` オプションは、コマンドラインにおいてパスワードを指定して、スクリプト化しや" -"すくするために使用されます。" - -msgid "" -"Use the :command:`corosync-cfgtool` utility with the :option:`-s` option to " -"get a summary of the health of the communication rings:" -msgstr "" -":command:`corosync-cfgtool` ユーティリティーに :option:`-s` オプションを付け" -"て実行して、コミュニケーションリングの稼働状態の概要を取得します。" - -msgid "" -"Use the :command:`corosync-objctl` utility to dump the Corosync cluster " -"member list:" -msgstr "" -":command:`corosync-objctl` ユーティリティーを使用して、Corosync クラスターの" -"メンバー一覧を出力します。" - -msgid "" -"Using Galera Cluster requires that you install two packages. The first is " -"the database server, which must include the wsrep API patch. The second " -"package is the Galera Replication Plugin, which enables the write-set " -"replication service functionality with the database server." -msgstr "" -"Galera Cluster を使用するために 2 つのパッケージをインストールする必要があり" -"ます。1 つ目はデータベースサーバーです。wsrep API パッチを含める必要がありま" -"す。2 つ目のパッケージは Galera Replication Plugin です。データベースサーバー" -"の書き込みセットレプリケーションサービス機能を有効にします。" - -msgid "Using the ``semanage`` utility, open the relevant ports:" -msgstr "``semanage`` ユーティリティーを使用して、関連するポートを開きます。" - -msgid "Value" -msgstr "値" - -msgid "Verify that the nodes are running:" -msgstr "そのノードが動作していることを検証します。" - -msgid "" -"We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, " -"which is a logical grouping of several Erlang nodes." -msgstr "" -"RabbitMQ ブローカーを構成する RabbitMQ ノードのクラスターを構築しています。こ" -"れは、いくつかの Erlang ノードの論理グループです。" - -msgid "" -"We have to configure the OpenStack components to use at least two RabbitMQ " -"nodes." -msgstr "" -"2 つ以上の RabbitMQ ノードを使用するよう、OpenStack のコンポーネントを設定す" -"る必要があります。" - -msgid "" -"We recommend HAProxy as the load balancer, however, there are many " -"alternatives in the marketplace." -msgstr "" -"ロードバランサーとして HAProxy を推奨しますが、マーケットプレースにさまざまな" -"同等品があります。" - -msgid "" -"We use a check interval of 1 second, however, the timeouts vary by service." -msgstr "1 秒間隔でチェックしますが、タイムアウト値はサービスにより異なります。" - -msgid "What is a cluster manager" -msgstr "クラスターマネージャーとは" - -msgid "" -"When Ceph RBD is used for ephemeral volumes as well as block and image " -"storage, it supports `live migration `_ of VMs with ephemeral drives; LVM " -"only supports live migration of volume-backed VMs." -msgstr "" -"Ceph RBD をブロックストレージやイメージストレージと同じように一時ストレージ用" -"に使用する場合、一時ボリュームを持つ仮想マシンの `ライブマイグレーション " -"` がサポートされます。LVM のみがボリュームをバックエンドとした仮想マシン" -"のライブマイグレーションをサポートします。" - -msgid "" -"When configuring an OpenStack environment for study or demonstration " -"purposes, it is possible to turn off the quorum checking; this is discussed " -"later in this guide. Production systems should always run with quorum " -"enabled." -msgstr "" -"学習やデモの目的に OpenStack 環境を設定している場合、クォーラムのチェックを無" -"効化できます。このガイドで後から議論します。本番システムは必ずクォーラムを有" -"効化して実行すべきです。" - -msgid "" -"When each cluster node starts, it checks the IP addresses given to the " -"``wsrep_cluster_address`` parameter and attempts to establish network " -"connectivity with a database server running there. Once it establishes a " -"connection, it attempts to join the Primary Component, requesting a state " -"transfer as needed to bring itself into sync with the cluster." -msgstr "" -"各クラスターノードが起動したとき、``wsrep_cluster_address`` パラメーターに指" -"定された IP アドレスを確認して、それで動作しているデータベースサーバーへの" -"ネットワーク接続性を確立しようとします。接続が確立されると、クラスターを同期" -"するために必要となる状態転送を要求する、Primary Component に参加しようとしま" -"す。" - -msgid "" -"When installing highly-available OpenStack on VMs, be sure that your " -"hypervisor permits promiscuous mode and disables MAC address filtering on " -"the external network." -msgstr "" -"仮想マシン上に高可用性 OpenStack をインストールする場合、ハイパーバイザーが外" -"部ネットワークにおいてプロミスキャスモードを許可して、MAC アドレスフィルタリ" -"ングを無効化していることを確認してください。" - -msgid "" -"When you find the correct path, run the :command:`iptables-save` command:" -msgstr "" -"適切なパスを見つけたとき、 :command:`iptables-save` コマンドを実行します。" - -msgid "" -"When you finish enabling the software repository for Galera Cluster, you can " -"install it using your package manager. The particular command and packages " -"you need to install varies depending on which database server you want to " -"install and which Linux distribution you use:" -msgstr "" -"Galera Cluster のソフトウェアリポジトリーを有効化すると、パッケージマネー" -"ジャーを使用してインストールできます。インストールに必要となる具体的なコマン" -"ドやパッケージは、インストールしたいデータベースサーバーと使用する Linux ディ" -"ストリビューションにより異なります。" - -msgid "" -"When you finish the installation and configuration process on each cluster " -"node in your OpenStack database, you can initialize Galera Cluster." -msgstr "" -"各ノードにおいて、お使いの OpenStack データベースのインストールと設定を完了す" -"ると、Galera Cluster を初期化できます。" - -msgid "" -"When you have all cluster nodes started, log into the database client on one " -"of them and check the ``wsrep_cluster_size`` status variable again." -msgstr "" -"すべてのクラスターノードを起動したとき、どれか 1 つにデータベースクライアント" -"からログインして、``wsrep_cluster_size`` 状態変数を再び確認します。" - -msgid "" -"While all of the configuration parameters available to the standard MySQL, " -"MariaDB or Percona XtraDB database server are available in Galera Cluster, " -"there are some that you must define an outset to avoid conflict or " -"unexpected behavior." -msgstr "" -"標準的な MySQL、MariaDB、Percona XtraDB データベースに利用できる設定パラメー" -"ターは Galera Cluster で利用できますが、競合や予期しない動作を避けるために始" -"めに定義する必要があるものがあります。" - -msgid "" -"While the application can still run after the failure of several instances, " -"it may not have sufficient capacity to serve the required volume of " -"requests. A cluster can automatically recover failed instances to prevent " -"additional load induced failures." -msgstr "" -"アプリケーションは、いくつかのインスタンスが故障した後も動作できますが、要求" -"されたリクエスト量を処理するための十分な容量がないかもしれません。クラスター" -"は自動的に故障したインスタンスを復旧して、さらなる負荷が障害を引き起こさない" -"ようにできます。" - -msgid "" -"While there will be multiple neutron LBaaS agents running, each agent will " -"manage a set of load balancers, that cannot be failed over to another node." -msgstr "" -"複数の neutron LBaaS エージェントが動作していますが、各エージェントは 1 組の" -"ロードバランサーを管理し、他のノードにフェールオーバーできません。" - -msgid "" -"With ``secauth`` enabled, Corosync nodes mutually authenticate using a 128-" -"byte shared secret stored in the :file:`/etc/corosync/authkey` file, which " -"may be generated with the :command:`corosync-keygen` utility. When using " -"``secauth``, cluster communications are also encrypted." -msgstr "" -"``secauth`` を有効化すると、Corosync ノードが :file:`/etc/corosync/authkey` " -"に保存された 128 バイトの共有シークレットを使用して相互に認証されます。これ" -"は、 :command:`corosync-keygen` ユーティリティーを使用して生成できます。 " -"``secauth`` を使用している場合、クラスター通信も暗号化されます。" - -msgid "" -"With the firewall configuration saved, whenever your OpenStack database " -"starts." -msgstr "" -"ファイアウォール設定を保存すると、OpenStack データベースを起動するときいつで" -"も。" - -msgid "With these options set, SELinux now permits Galera Cluster to operate." -msgstr "" -"これらのオプションを設定すると、SELinux により Galera Cluster の動作を許可さ" -"れます。" - -msgid "" -"Within the ``nodelist`` directive, it is possible to specify specific " -"information about the nodes in the cluster. The directive can contain only " -"the node sub-directive, which specifies every node that should be a member " -"of the membership, and where non-default options are needed. Every node must " -"have at least the ``ring0_addr`` field filled." -msgstr "" -"``nodelist`` ディレクティブに、クラスター内のノードに関する具体的な情報を指定" -"できます。このディレクティブは、node サブディレクティブのみを含められます。こ" -"れは、メンバーシップのすべてのメンバーを指定し、デフォルト以外に必要となるオ" -"プションを指定します。すべてのノードは、少なくとも ``ring0_addr`` の項目を入" -"力する必要があります。" - -msgid "" -"Without the ``backend_url`` option being set only one instance of both the " -"central and compute agent service is able to run and function correctly." -msgstr "" -"``backend_url`` オプションを設定しないと、中央エージェントとコンピュートエー" -"ジェントのインスタンスのどちらかのみが正しく動作して機能できます。" - -msgid "" -"You also need to create the OpenStack Identity Endpoint with this IP address." -msgstr "" -"この IP アドレスを用いて OpenStack Identity エンドポイントを作成する必要があ" -"ります。" - -msgid "" -"You can achieve high availability for the OpenStack database in many " -"different ways, depending on the type of database that you want to use. " -"There are three implementations of Galera Cluster available to you:" -msgstr "" -"使用したいデータベースの種類に応じて、さまざまな情報で OpenStack のデータベー" -"スの高可用性を実現できます。Galera Cluster は 3 種類の実装があります。" - -msgid "" -"You can alternatively use a commercial load balancer, which is a hardware or " -"software. A hardware load balancer generally has good performance." -msgstr "" -"代わりに、ハードウェアやソフトウェアの商用ロードバランサーを使用することもで" -"きます。ハードウェアロードバランサーは、一般的に高性能です。" - -msgid "" -"You can have up to 16 cluster members (this is currently limited by the " -"ability of corosync to scale higher). In extreme cases, 32 and even up to 64 " -"nodes could be possible, however, this is not well tested." -msgstr "" -"クラスターのメンバーを 16 まで持てます (これは、corosync をよりスケールさせる" -"機能による、現在の制限です)。極端な場合、32 や 64 までのノードさえ利用できま" -"すが、十分にテストされていません。" - -msgid "" -"You can now add the Pacemaker configuration for Block Storage API resource. " -"Connect to the Pacemaker cluster with the :command:`crm configure` command " -"and add the following cluster resources:" -msgstr "" -"Block Storage API リソース用の Pacemaker 設定を追加できます。 :command:`crm " -"configure` を用いて Pacemaker クラスターに接続し、以下のクラスターリソースを" -"追加します。" - -msgid "" -"You can now add the Pacemaker configuration for the OpenStack Identity " -"resource by running the :command:`crm configure` command to connect to the " -"Pacemaker cluster. Add the following cluster resources:" -msgstr "" -"ここで OpenStack Identity リソース向けに Pacemaker の設定を追加できます。:" -"command:`crm configure` コマンドを使用して、Pacemaker クラスターに接続しま" -"す。以下のクラスターリソースを追加します。" - -msgid "" -"You can now add the Pacemaker configuration for the OpenStack Image API " -"resource. Use the :command:`crm configure` command to connect to the " -"Pacemaker cluster and add the following cluster resources:" -msgstr "" -"ここで OpenStack Image API リソース向けに Pacemaker の設定を追加できます。:" -"command:`crm configure` コマンドを使用して、Pacemaker クラスターに接続して、" -"以下のクラスターリソースを追加します。" - -msgid "" -"You can now add the Pacemaker configuration for the Shared File Systems API " -"resource. Connect to the Pacemaker cluster with the :command:`crm configure` " -"command and add the following cluster resources:" -msgstr "" -"Shared File Systems API リソース用の Pacemaker 設定を追加できます。 :command:" -"`crm configure` を用いて Pacemaker クラスターに接続し、以下のクラスターリソー" -"スを追加します。" - -msgid "You can now check the Corosync connectivity with two tools." -msgstr "2 つのツールを用いて Corosync 接続性を確認できます。" - -msgid "" -"You can read more about these concerns on the `Red Hat Bugzilla `_ and there is a `psuedo " -"roadmap `_ " -"for addressing them upstream." -msgstr "" -"これらの課題の詳細は `Red Hat Bugzilla `_ にあります。また、アップストリームにおいて解決するための " -"`psuedo roadmap `_ があります。" - -msgid "" -"You must also create the OpenStack Image API endpoint with this IP address. " -"If you are using both private and public IP addresses, you should create two " -"virtual IP addresses and define your endpoint like this:" -msgstr "" -"この IP アドレスを用いて OpenStack Image API エンドポイントを作成する必要があ" -"ります。プライベート IP アドレスとパブリック IP アドレスを両方使用している場" -"合、2 つの仮想 IP アドレスを作成して、次のようにエンドポイントを定義する必要" -"があります。" - -msgid "" -"You must configure NTP to properly synchronize services among nodes. We " -"recommend that you configure the controller node to reference more accurate " -"(lower stratum) servers and other nodes to reference the controller node. " -"For more information, see the `Install Guides `_." -msgstr "" -"サービスをノード間で正しく同期するために、NTP を設定する必要があります。コン" -"トローラーノードをできる限り正確な(ストラタム値が小さい)サーバーに参照する" -"ように設定し、他のノードはコントローラーノードを参照するよう設定することを推" -"奨します。詳細は `Install Guides `_ を参照してください。" - -msgid "" -"You must configure a supported Tooz driver for the HA deployment of the " -"Telemetry services." -msgstr "" -"Telemetry サービスの高可用性デプロイのために、サポートされる Tooz ドライバー" -"を設定する必要があります。" - -msgid "You must create the Block Storage API endpoint with this IP." -msgstr "" -"この IP を用いて Block Storage API エンドポイントを作成する必要があります。" - -msgid "You must create the Shared File Systems API endpoint with this IP." -msgstr "" -"この IP を用いて Shared File Systems API エンドポイントを作成する必要がありま" -"す。" - -msgid "" -"You must first download the OpenStack Identity resource to Pacemaker by " -"running the following commands:" -msgstr "" -"まず、以下のコマンドを実行して、OpenStack Identity リソースを Pacemaker にダ" -"ウンロードする必要があります。" - -msgid "You must first download the resource agent to your system:" -msgstr "" -"まず、お使いのシステムにリソースエージェントをダウンロードする必要がありま" -"す。" - -msgid "" -"You must select and assign a virtual IP address (VIP) that can freely float " -"between cluster nodes." -msgstr "" -"クラスターノード間で自由に移動できる仮想 IP アドレス (VIP) を選択して割り当て" -"る必要があります。" - -msgid "" -"You must use the same name on every cluster node. The connection fails when " -"this value does not match." -msgstr "" -"すべてのクラスターノードにおいて同じ名前を使用する必要があります。この値が一" -"致しない場合、接続が失敗します。" - -msgid "" -"You only need to do this on one cluster node. Galera Cluster replicates the " -"user to all the others." -msgstr "" -"どれか 1 つのクラスターノードにおいてのみ実行する必要があります。Galera " -"Cluster が、他のすべてのノードにユーザーを複製します。" - -msgid "" -"You should see a ``status=joined`` entry for each of your constituent " -"cluster nodes." -msgstr "" -"構成している各クラスターノードが ``status=joined`` になっているはずです。" - -msgid "" -"You would choose this option if you prefer to have fewer but more powerful " -"boxes." -msgstr "より少数の高性能なマシンを好む場合、この選択肢を選択するでしょう。" - -msgid "" -"You would choose this option if you prefer to have more but less powerful " -"boxes." -msgstr "より多数の低性能なマシンを好む場合、この選択肢を選択するでしょう。" - -msgid "" -"Your OpenStack services must now point their Block Storage API configuration " -"to the highly available, virtual cluster IP address rather than a Block " -"Storage API server’s physical IP address as you would for a non-HA " -"environment." -msgstr "" -"OpenStack サービスは、非 HA 環境と同じように Block Storage API サーバーの物" -"理 IP アドレスを指定する代わりに、Block Storage API の設定が高可用性と仮想ク" -"ラスター IP アドレスを指し示す必要があります。" - -msgid "" -"Your OpenStack services must now point their OpenStack Identity " -"configuration to the highly available virtual cluster IP address rather than " -"point to the physical IP address of an OpenStack Identity server as you " -"would do in a non-HA environment." -msgstr "" -"OpenStack サービスが、非 HA 環境であるような OpenStack Identity サーバーの物" -"理 IP アドレスを指し示す代わりに、高可用性な仮想クラスター IP アドレスを指し" -"示すように、それらの OpenStack Identity の設定を変更する必要があります。" - -msgid "" -"Your OpenStack services must now point their OpenStack Image API " -"configuration to the highly available, virtual cluster IP address instead of " -"pointint to the physical IP address of an OpenStack Image API server as you " -"would in a non-HA cluster." -msgstr "" -"OpenStack サービスが、非 HA クラスターであるような OpenStack Image API サー" -"バーの物理 IP アドレスを指し示す代わりに、高可用性な仮想クラスター IP アドレ" -"スを指し示すように、それらの OpenStack Image API の設定を変更する必要がありま" -"す。" - -msgid "" -"Your OpenStack services must now point their Shared File Systems API " -"configuration to the highly available, virtual cluster IP address rather " -"than a Shared File Systems API server’s physical IP address as you would for " -"a non-HA environment." -msgstr "" -"OpenStack サービスは、通常の非高可用性環境のように、Shared File Systems API " -"サーバーの物理 IP アドレスを指定する代わりに、Shared File Systems API の設定" -"が高可用性と仮想クラスター IP アドレスを指し示す必要があります。" - -msgid "[TODO (Add Telemetry overview)]" -msgstr "[TODO (Add Telemetry overview)]" - -msgid "[TODO -- write intro to this section]" -msgstr "[TODO -- write intro to this section]" - -msgid "" -"[TODO Need description of VIP failover inside Linux namespaces and expected " -"SLA.]" -msgstr "" -"[TODO Need description of VIP failover inside Linux namespaces and expected " -"SLA.]" - -msgid "" -"[TODO Need discussion of network hardware, bonding interfaces, intelligent " -"Layer 2 switches, routers and Layer 3 switches.]" -msgstr "" -"[TODO Need discussion of network hardware, bonding interfaces, intelligent " -"Layer 2 switches, routers and Layer 3 switches.]" - -msgid "" -"[TODO: Verify that Oslo supports hash synchronization; if so, this should " -"not take more than load balancing.]" -msgstr "" -"[TODO: Verify that Oslo supports hash synchronization; if so, this should " -"not take more than load balancing.]" - -msgid "" -"[TODO: Add discussion of remote backup facilities as an alternate way to " -"secure ones data. Include brief mention of key third-party technologies with " -"links to their documentation]" -msgstr "" -"[TODO: Add discussion of remote backup facilities as an alternate way to " -"secure ones data. Include brief mention of key third-party technologies with " -"links to their documentation]" - -msgid "" -"[TODO: Does this list need to be updated? Perhaps we need a table that shows " -"each component and the earliest release that allows it to work with HA " -"queues.]" -msgstr "" -"[TODO: Does this list need to be updated? Perhaps we need a table that shows " -"each component and the earliest release that allows it to work with HA " -"queues.]" - -msgid "" -"[TODO: Provide a minimal architecture example for HA, expanded on that given " -"in http://docs.openstack.org/liberty/install-guide-ubuntu/environment.html " -"for easy comparison]" -msgstr "" -"[TODO: Provide a minimal architecture example for HA, expanded on that given " -"in http://docs.openstack.org/liberty/install-guide-ubuntu/environment.html " -"for easy comparison]" - -msgid "[TODO: Should the example instead use a minimum of three nodes?]" -msgstr "[TODO: Should the example instead use a minimum of three nodes?]" - -msgid "" -"[TODO: Should the main example now use corosync-cmapctl and have the note " -"give the command for Corosync version 1?]" -msgstr "" -"[TODO: Should the main example now use corosync-cmapctl and have the note " -"give the command for Corosync version 1?]" - -msgid "[TODO: Should this show three hosts?]" -msgstr "[TODO: Should this show three hosts?]" - -msgid "" -"[TODO: This hands off to two different docs for install information. We " -"should choose one or explain the specific purpose of each.]" -msgstr "" -"[TODO: This hands off to two different docs for install information. We " -"should choose one or explain the specific purpose of each.]" - -msgid "" -"[TODO: This section should begin with a brief mention about what HA queues " -"are and why they are valuable, etc]" -msgstr "" -"[TODO: This section should begin with a brief mention about what HA queues " -"are and why they are valuable, etc]" - -msgid "" -"[TODO: Update this information. Can this service now be made HA in active/" -"active mode or do we need to pull in the instructions to run this service in " -"active/passive mode?]" -msgstr "" -"[TODO: Update this information. Can this service now be made HA in active/" -"active mode or do we need to pull in the instructions to run this service in " -"active/passive mode?]" - -msgid "" -"[TODO: Verify that the active/passive network configuration information from " -"``_ should not be included here." -msgstr "" -"[TODO: Verify that the active/passive network configuration information from " -"``_ should not be included here." - -msgid "[TODO: Verify that these numbers are good]" -msgstr "[TODO: Verify that these numbers are good]" - -msgid "[TODO: need more discussion of these parameters]" -msgstr "[TODO: need more discussion of these parameters]" - -msgid "[TODO: replace \"currently\" with specific release names]" -msgstr "[TODO: replace \"currently\" with specific release names]" - -msgid "[TODO: update this section.]" -msgstr "[TODO: update this section.]" - -msgid "" -"[TODO: we need more commentary about the contents and format of this file]" -msgstr "" -"[TODO: we need more commentary about the contents and format of this file]" - -msgid "[Verify fingerprint of imported GPG key; see below]" -msgstr "[Verify fingerprint of imported GPG key; see below]" - -msgid "" -"`CentOS and RHEL `_" -msgstr "" -"`CentOS および RHEL `_" - -msgid "" -"`Ceph RBD `_ is an innately high availability storage back " -"end. It creates a storage cluster with multiple nodes that communicate with " -"each other to replicate and redistribute data dynamically. A Ceph RBD " -"storage cluster provides a single shared set of storage nodes that can " -"handle all classes of persistent and ephemeral data -- glance, cinder, and " -"nova -- that are required for OpenStack instances." -msgstr "" -"`Ceph RBD `_ は、本質的に高可用性なストレージバックエンドで" -"す。複数のノードを用いてストレージクラスターを作成し、お互いに通信して動的に" -"レプリケーションとデータ再配布を実行します。Ceph RBD ストレージクラスターは、" -"OpenStack インスタンスに必要となる、すべての種類の永続データと一時データ " -"(glance、cinder、nova) を取り扱える、単一の共有ストレージノードを提供します。" - -msgid "`Clustering Guide `_" -msgstr "`Clustering Guide `_" - -msgid "`Debian and Ubuntu `_" -msgstr "`Debian および Ubuntu `_" - -msgid "" -"`Galera Cluster for MySQL `_ The MySQL reference " -"implementation from Codership, Oy;" -msgstr "" -"`Galera Cluster for MySQL `_ Codership, Oy による " -"MySQL リファレンス実装" - -msgid "`Highly Available Queues `_" -msgstr "`Highly Available Queues `_" - -msgid "" -"`LP1328922 ` and " -"`LP1349398 ` are " -"related.]" -msgstr "" -"`LP1328922 ` and " -"`LP1349398 ` are " -"related.]" - -msgid "" -"`MariaDB Galera Cluster `_ The MariaDB implementation " -"of Galera Cluster, which is commonly supported in environments based on Red " -"Hat distributions;" -msgstr "" -"`MariaDB Galera Cluster `_ Galera Cluster の MariaDB 実" -"装、一般的に Red Hat 系ディストリビューションの環境においてサポートされます" - -msgid "`Memcached `__." -msgstr "`Memcached `__." - -msgid "" -"`Pacemaker `_ cluster stack is the state-of-the-art " -"high availability and load balancing stack for the Linux platform. Pacemaker " -"is useful to make OpenStack infrastructure highly available. Also, it is " -"storage and application-agnostic, and in no way specific to OpenStack." -msgstr "" -"`Pacemaker `_ クラスタースタックは、Linux プラット" -"フォーム向けの最高水準の高可用性と負荷分散を実現します。Pacemaker は " -"OpenStack インフラを高可用化するために役立ちます。また、ストレージとアプリ" -"ケーションから独立していて、OpenStack 特有の方法はありません。" - -msgid "" -"`Percona XtraDB Cluster `_ The XtraDB " -"implementation of Galera Cluster from Percona." -msgstr "" -"`Percona XtraDB Cluster `_ Percona による Galera " -"Cluster の XtraDB 実装" - -msgid "" -"`Provider networks `_" -msgstr "" -"`プロバイダーネットワーク `_" - -msgid "" -"`RPM based `_ (RHEL, Fedora, " -"CentOS, openSUSE)" -msgstr "" -"`RPM ベース `_ (RHEL, Fedora, " -"CentOS, openSUSE)" - -msgid "`Redis `__." -msgstr "`Redis `__。" - -msgid "" -"`Self-service networks `_" -msgstr "" -"`セルフサービスネットワーク `_" - -msgid "" -"`Ubuntu `_" -msgstr "" -"`Ubuntu `_" - -msgid "`Zookeeper `__." -msgstr "`Zookeeper `__。" - -msgid "``/etc/iptables/iptables.rules``" -msgstr "``/etc/iptables/iptables.rules``" - -msgid "``/etc/sysconfig/iptables``" -msgstr "``/etc/sysconfig/iptables``" - -msgid "``0xcbcb082a1bb943db``" -msgstr "``0xcbcb082a1bb943db``" - -msgid "``1C4CBDCDCD2EFD2A``" -msgstr "``1C4CBDCDCD2EFD2A``" - -msgid "``BC19DDBA``" -msgstr "``BC19DDBA``" - -msgid "``crmsh``" -msgstr "``crmsh``" - -msgid "``firewall-cmd``" -msgstr "``firewall-cmd``" - -msgid "``iptables``" -msgstr "``iptables``" - -msgid "" -"``last_man_standing_window`` specifies the time, in milliseconds, required " -"to recalculate quorum after one or more hosts have been lost from the " -"cluster. To do the new quorum recalculation, the cluster must have quorum " -"for at least the interval specified for ``last_man_standing_window``; the " -"default is 10000ms." -msgstr "" -"``last_man_standing_window`` は、1 つ以上のホストがクラスターから失われた後、" -"クォーラムを再計算するために必要となる時間をミリ秒単位で指定します。新しく" -"クォーラムを再計算するために、クラスターは少なくとも " -"``last_man_standing_window`` に指定された間隔はクォーラムを保持する必要があり" -"ます。デフォルトは 10000ms です。" - -msgid "" -"``nodeid`` is optional when using IPv4 and required when using IPv6. This is " -"a 32-bit value specifying the node identifier delivered to the cluster " -"membership service. If this is not specified with IPv4, the node id is " -"determined from the 32-bit IP address of the system to which the system is " -"bound with ring identifier of 0. The node identifier value of zero is " -"reserved and should not be used." -msgstr "" -"``nodeid`` は、IPv4 を使用するときにオプション、IPv6 を使用するときに必須で" -"す。クラスターメンバーシップサービスに配信される、ノード識別子を指定する 32 " -"ビットの値です。IPv4 で指定されていない場合、ノード ID は、システムがリング識" -"別子 0 に割り当てた 32 ビットの IP アドレスになります。ノード識別子の値 0 " -"は、予約済みであり、使用してはいけません。" - -msgid "``pcs``" -msgstr "``pcs``" - -msgid "" -"``ring{X}_addr`` specifies the IP address of one of the nodes. {X} is the " -"ring number." -msgstr "" -"``ring{X}_addr`` は、1 つのノードの IP アドレスを指定します。{X} はリングの番" -"号です。" - -msgid "" -"`openSUSE and SUSE Linux Enterprise Server `_" -msgstr "" -"`openSUSE、SUSE Linux Enterprise Server `_" - -msgid "allow_automatic_l3agent_failover" -msgstr "allow_automatic_l3agent_failover" - -msgid "compute node" -msgstr "コンピュートノード" - -msgid "controller node" -msgstr "コントローラーノード" - -msgid "corosync" -msgstr "corosync" - -msgid "fence-agents (CentOS or RHEL) or cluster-glue" -msgstr "fence-agents (CentOS、RHEL) または cluster-glue" - -msgid "http://lists.openstack.org/pipermail/openstack-dev/2014-May/035264.html" -msgstr "" -"http://lists.openstack.org/pipermail/openstack-dev/2014-May/035264.html" - -msgid "http://www.joinfu.com/" -msgstr "http://www.joinfu.com/" - -msgid "l3_ha" -msgstr "l3_ha" - -msgid "libqb0" -msgstr "libqb0" - -msgid "max_l3_agents_per_router" -msgstr "max_l3_agents_per_router" - -msgid "min_l3_agents_per_router" -msgstr "min_l3_agents_per_router" - -msgid "openSUSE" -msgstr "openSUSE" - -msgid "pacemaker" -msgstr "pacemaker" - -msgid "pcs (CentOS or RHEL) or crmsh" -msgstr "pcs (CentOS、RHEL) または crmsh" - -msgid "resource-agents" -msgstr "resource-agents" diff --git a/doc/ha-guide/source/networking-ha-dhcp.rst b/doc/ha-guide/source/networking-ha-dhcp.rst deleted file mode 100644 index ad37dab1..00000000 --- a/doc/ha-guide/source/networking-ha-dhcp.rst +++ /dev/null @@ -1,17 +0,0 @@ - -.. _dhcp-agent: - -====================== -Run neutron DHCP agent -====================== - -The OpenStack Networking service has a scheduler -that lets you run multiple agents across nodes; -the DHCP agent can be natively highly available. -To configure the number of DHCP agents per network, -modify the ``dhcp_agents_per_network`` parameter -in the :file:`/etc/neutron/neutron.conf` file. -By default this is set to 1. -To achieve high availability, -assign more than one DHCP agent per network. - diff --git a/doc/ha-guide/source/networking-ha-l3.rst b/doc/ha-guide/source/networking-ha-l3.rst deleted file mode 100644 index 511e25cf..00000000 --- a/doc/ha-guide/source/networking-ha-l3.rst +++ /dev/null @@ -1,37 +0,0 @@ - -.. _neutron-l3: - -==================== -Run neutron L3 agent -==================== - -The neutron L3 agent is scalable, due to the scheduler that supports -Virtual Router Redundancy Protocol (VRRP) -to distribute virtual routers across multiple nodes. -To enable high availability for configured routers, -edit the :file:`/etc/neutron/neutron.conf` file -to set the following values: - -.. list-table:: /etc/neutron/neutron.conf parameters for high availability - :widths: 15 10 30 - :header-rows: 1 - - * - Parameter - - Value - - Description - * - l3_ha - - True - - All routers are highly available by default. - * - allow_automatic_l3agent_failover - - True - - Set automatic L3 agent failover for routers - * - max_l3_agents_per_router - - 2 or more - - Maximum number of network nodes to use for the HA router. - * - min_l3_agents_per_router - - 2 or more - - Minimum number of network nodes to use for the HA router. - A new router can be created only if this number - of network nodes are available. - - diff --git a/doc/ha-guide/source/networking-ha-lbaas.rst b/doc/ha-guide/source/networking-ha-lbaas.rst deleted file mode 100644 index e0a6a237..00000000 --- a/doc/ha-guide/source/networking-ha-lbaas.rst +++ /dev/null @@ -1,17 +0,0 @@ - -.. _neutron-lbaas: - -======================= -Run neutron LBaaS agent -======================= - -Currently, no native feature is provided -to make the LBaaS agent highly available -using the default plug-in HAProxy. -A common way to make HAProxy highly available -is to use the VRRP (Virtual Router Redundancy Protocol). -Unfortunately, this is not yet implemented -in the LBaaS HAProxy plug-in. - -[TODO: update this section.] - diff --git a/doc/ha-guide/source/networking-ha-metadata.rst b/doc/ha-guide/source/networking-ha-metadata.rst deleted file mode 100644 index fa4f10d4..00000000 --- a/doc/ha-guide/source/networking-ha-metadata.rst +++ /dev/null @@ -1,18 +0,0 @@ - -.. _neutron-metadata: - -========================== -Run neutron metadata agent -========================== - -No native feature is available -to make this service highly available. -At this time, the Active/Passive solution exists -to run the neutron metadata agent -in failover mode with Pacemaker. - -[TODO: Update this information. -Can this service now be made HA in active/active mode -or do we need to pull in the instructions -to run this service in active/passive mode?] - diff --git a/doc/ha-guide/source/networking-ha.rst b/doc/ha-guide/source/networking-ha.rst deleted file mode 100644 index b53d9432..00000000 --- a/doc/ha-guide/source/networking-ha.rst +++ /dev/null @@ -1,60 +0,0 @@ - -======================= -OpenStack network nodes -======================= - -Configure networking on each node. -The -`Networking `_ -section of the *Install Guide* includes basic information -about configuring networking. - -Notes from planning outline: - -- Rather than configuring neutron here, - we should simply mention physical network HA methods - such as bonding and additional node/network requirements - for L3HA and DVR for planning purposes. -- Neutron agents shuld be described for active/active; - deprecate single agent's instances case. -- For Kilo and beyond, focus on L3HA and DVR. -- Link to `Networking Guide `_ - for configuration details. - -[TODO: Verify that the active/passive -network configuration information from -``_ -should not be included here. - -`LP1328922 `_ -and -`LP1349398 `_ -are related.] - -OpenStack network nodes contain: - -- :ref:`Neutron DHCP agent` -- Neutron L2 agent. - Note that the L2 agent cannot be distributed and highly available. - Instead, it must be installed on each data forwarding node - to control the virtual network drivers - such as Open vSwitch or Linux Bridge. - One L2 agent runs per node and controls its virtual interfaces. -- :ref:`Neutron L3 agent` -- :ref:`Neutron metadata agent` -- :ref:`Neutron LBaaS` (Load Balancing as a Service) agent - -.. note:: - - For Liberty, we do not have the standalone network nodes in general. - We usually run the Networking services on the controller nodes. - In this guide, we use the term "network nodes" for convenience. - -.. toctree:: - :maxdepth: 2 - - networking-ha-dhcp.rst - networking-ha-l3.rst - networking-ha-metadata.rst - networking-ha-lbaas.rst - diff --git a/doc/ha-guide/source/noncore-ha.rst b/doc/ha-guide/source/noncore-ha.rst deleted file mode 100644 index 93675e8d..00000000 --- a/doc/ha-guide/source/noncore-ha.rst +++ /dev/null @@ -1,4 +0,0 @@ - -===================================================== -Configuring non-core components for high availability -===================================================== diff --git a/doc/ha-guide/source/storage-ha-backend.rst b/doc/ha-guide/source/storage-ha-backend.rst deleted file mode 100644 index a6d1deae..00000000 --- a/doc/ha-guide/source/storage-ha-backend.rst +++ /dev/null @@ -1,85 +0,0 @@ - -.. _storage-ha-backend: - -================ -Storage back end -================ - -Most of this guide concerns the control plane of high availability: -ensuring that services continue to run even if a component fails. -Ensuring that data is not lost -is the data plane component of high availability; -this is discussed here. - -An OpenStack environment includes multiple data pools for the VMs: - -- Ephemeral storage is allocated for an instance - and is deleted when the instance is deleted. - The Compute service manages ephemeral storage. - By default, Compute stores ephemeral drives as files - on local disks on the Compute node - but Ceph RBD can instead be used - as the storage back end for ephemeral storage. - -- Persistent storage exists outside all instances. - Two types of persistent storage are provided: - - - Block Storage service (cinder) - can use LVM or Ceph RBD as the storage back end. - - Image service (glance) - can use the Object Storage service (swift) - or Ceph RBD as the storage back end. - -For more information about configuring storage back ends for -the different storage options, see the `Administrator Guide -`_. - -This section discusses ways to protect against -data loss in your OpenStack environment. - -RAID drives ------------ - -Configuring RAID on the hard drives that implement storage -protects your data against a hard drive failure. -If, however, the node itself fails, data may be lost. -In particular, all volumes stored on an LVM node can be lost. - -Ceph ----- - -`Ceph RBD `_ -is an innately high availability storage back end. -It creates a storage cluster with multiple nodes -that communicate with each other -to replicate and redistribute data dynamically. -A Ceph RBD storage cluster provides -a single shared set of storage nodes -that can handle all classes of persistent and ephemeral data --- glance, cinder, and nova -- -that are required for OpenStack instances. - -Ceph RBD provides object replication capabilities -by storing Block Storage volumes as Ceph RBD objects; -Ceph RBD ensures that each replica of an object -is stored on a different node. -This means that your volumes are protected against -hard drive and node failures -or even the failure of the data center itself. - -When Ceph RBD is used for ephemeral volumes -as well as block and image storage, it supports -`live migration -`_ -of VMs with ephemeral drives; -LVM only supports live migration of volume-backed VMs. - -Remote backup facilities ------------------------- - -[TODO: Add discussion of remote backup facilities -as an alternate way to secure ones data. -Include brief mention of key third-party technologies -with links to their documentation] - - diff --git a/doc/ha-guide/source/storage-ha-cinder.rst b/doc/ha-guide/source/storage-ha-cinder.rst deleted file mode 100644 index 2168b47a..00000000 --- a/doc/ha-guide/source/storage-ha-cinder.rst +++ /dev/null @@ -1,238 +0,0 @@ -.. highlight: ini - :linenothreshold: 5 - -================================== -Highly available Block Storage API -================================== - -Cinder provides 'block storage as a service' suitable for performance -sensitive scenarios such as databases, expandable file systems, or -providing a server with access to raw block level storage. - -Persistent block storage can survive instance termination and can also -be moved across instances like any external storage device. Cinder -also has volume snapshots capability for backing up the volumes. - -Making this Block Storage API service highly available in -active/passive mode involves: - -- :ref:`ha-cinder-pacemaker` -- :ref:`ha-cinder-configure` -- :ref:`ha-cinder-services` - -In theory, you can run the Block Storage service as active/active. -However, because of sufficient concerns, it is recommended running -the volume component as active/passive only. - -Jon Bernard writes: - -:: - - Requests are first seen by Cinder in the API service, and we have a - fundamental problem there - a standard test-and-set race condition - exists for many operations where the volume status is first checked - for an expected status and then (in a different operation) updated to - a pending status. The pending status indicates to other incoming - requests that the volume is undergoing a current operation, however it - is possible for two simultaneous requests to race here, which - undefined results. - - Later, the manager/driver will receive the message and carry out the - operation. At this stage there is a question of the synchronization - techniques employed by the drivers and what guarantees they make. - - If cinder-volume processes exist as different process, then the - 'synchronized' decorator from the lockutils package will not be - sufficient. In this case the programmer can pass an argument to - synchronized() 'external=True'. If external is enabled, then the - locking will take place on a file located on the filesystem. By - default, this file is placed in Cinder's 'state directory' in - /var/lib/cinder so won't be visible to cinder-volume instances running - on different machines. - - However, the location for file locking is configurable. So an - operator could configure the state directory to reside on shared - storage. If the shared storage in use implements unix file locking - semantics, then this could provide the requisite synchronization - needed for an active/active HA configuration. - - The remaining issue is that not all drivers use the synchronization - methods, and even fewer of those use the external file locks. A - sub-concern would be whether they use them correctly. - -You can read more about these concerns on the -`Red Hat Bugzilla `_ -and there is a -`psuedo roadmap `_ -for addressing them upstream. - - -.. _ha-cinder-pacemaker: - -Add Block Storage API resource to Pacemaker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On RHEL-based systems, you should create resources for cinder's -systemd agents and create constraints to enforce startup/shutdown -ordering: - -.. code-block:: console - - pcs resource create openstack-cinder-api systemd:openstack-cinder-api --clone interleave=true - pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler --clone interleave=true - pcs resource create openstack-cinder-volume systemd:openstack-cinder-volume - - pcs constraint order start openstack-cinder-api-clone then openstack-cinder-scheduler-clone - pcs constraint colocation add openstack-cinder-scheduler-clone with openstack-cinder-api-clone - pcs constraint order start openstack-cinder-scheduler-clone then openstack-cinder-volume - pcs constraint colocation add openstack-cinder-volume with openstack-cinder-scheduler-clone - - -If the Block Storage service runs on the same nodes as the other services, -then it is advisable to also include: - -.. code-block:: console - - pcs constraint order start openstack-keystone-clone then openstack-cinder-api-clone - -Alternatively, instead of using systemd agents, download and -install the OCF resource agent: - -.. code-block:: console - - # cd /usr/lib/ocf/resource.d/openstack - # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/cinder-api - # chmod a+rx * - -You can now add the Pacemaker configuration for Block Storage API resource. -Connect to the Pacemaker cluster with the :command:`crm configure` command -and add the following cluster resources: - -:: - - primitive p_cinder-api ocf:openstack:cinder-api \ - params config="/etc/cinder/cinder.conf" - os_password="secretsecret" - os_username="admin" \ - os_tenant_name="admin" - keystone_get_token_url="http://10.0.0.11:5000/v2.0/tokens" \ - op monitor interval="30s" timeout="30s" - -This configuration creates ``p_cinder-api``, -a resource for managing the Block Storage API service. - -The command :command:`crm configure` supports batch input, -so you may copy and paste the lines above -into your live pacemaker configuration and then make changes as required. -For example, you may enter ``edit p_ip_cinder-api`` -from the :command:`crm configure` menu -and edit the resource to match your preferred virtual IP address. - -Once completed, commit your configuration changes -by entering :command:`commit` from the :command:`crm configure` menu. -Pacemaker then starts the Block Storage API service -and its dependent resources on one of your nodes. - -.. _ha-cinder-configure: - -Configure Block Storage API service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the ``/etc/cinder/cinder.conf`` file: - -On a RHEL-based system, it should look something like: - -.. code-block:: ini - :linenos: - - [DEFAULT] - # This is the name which we should advertise ourselves as and for - # A/P installations it should be the same everywhere - host = cinder-cluster-1 - - # Listen on the Block Storage VIP - osapi_volume_listen = 10.0.0.11 - - auth_strategy = keystone - control_exchange = cinder - - volume_driver = cinder.volume.drivers.nfs.NfsDriver - nfs_shares_config = /etc/cinder/nfs_exports - nfs_sparsed_volumes = true - nfs_mount_options = v3 - - [database] - sql_connection = mysql://cinder:CINDER_DBPASS@10.0.0.11/cinder - max_retries = -1 - - [keystone_authtoken] - # 10.0.0.11 is the Keystone VIP - identity_uri = http://10.0.0.11:35357/ - auth_uri = http://10.0.0.11:5000/ - admin_tenant_name = service - admin_user = cinder - admin_password = CINDER_PASS - - [oslo_messaging_rabbit] - # Explicitly list the rabbit hosts as it doesn't play well with HAProxy - rabbit_hosts = 10.0.0.12,10.0.0.13,10.0.0.14 - # As a consequence, we also need HA queues - rabbit_ha_queues = True - heartbeat_timeout_threshold = 60 - heartbeat_rate = 2 - -Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage -database. Replace ``CINDER_PASS`` with the password you chose for the -``cinder`` user in the Identity service. - -This example assumes that you are using NFS for the physical storage, which -will almost never be true in a production installation. - -If you are using the Block Storage service OCF agent, some settings will -be filled in for you, resulting in a shorter configuration file: - -.. code-block:: ini - :linenos: - - # We have to use MySQL connection to store data: - sql_connection = mysql://cinder:CINDER_DBPASS@10.0.0.11/cinder - # Alternatively, you can switch to pymysql, - # a new Python 3 compatible library and use - # sql_connection = mysql+pymysql://cinder:CINDER_DBPASS@10.0.0.11/cinder - # and be ready when everything moves to Python 3. - # Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation - - # We bind Block Storage API to the VIP: - osapi_volume_listen = 10.0.0.11 - - # We send notifications to High Available RabbitMQ: - notifier_strategy = rabbit - rabbit_host = 10.0.0.11 - -Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage -database. - -.. _ha-cinder-services: - -Configure OpenStack services to use highly available Block Storage API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your OpenStack services must now point their -Block Storage API configuration to the highly available, -virtual cluster IP address -rather than a Block Storage API server’s physical IP address -as you would for a non-HA environment. - -You must create the Block Storage API endpoint with this IP. - -If you are using both private and public IP addresses, -you should create two virtual IPs and define your endpoint like this: - -.. code-block:: console - - $ keystone endpoint-create --region $KEYSTONE_REGION \ - --service-id $service-id \ - --publicurl 'http://PUBLIC_VIP:8776/v1/%(tenant_id)s' \ - --adminurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \ - --internalurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' - diff --git a/doc/ha-guide/source/storage-ha-glance.rst b/doc/ha-guide/source/storage-ha-glance.rst deleted file mode 100644 index 5afb211a..00000000 --- a/doc/ha-guide/source/storage-ha-glance.rst +++ /dev/null @@ -1,130 +0,0 @@ -==================================== -Highly available OpenStack Image API -==================================== - -The OpenStack Image service offers a service for discovering, -registering, and retrieving virtual machine images. -To make the OpenStack Image API service highly available -in active / passive mode, you must: - -- :ref:`glance-api-pacemaker` -- :ref:`glance-api-configure` -- :ref:`glance-services` - -This section assumes that you are familiar with the -`documentation -`_ -for installing the OpenStack Image API service. - -.. _glance-api-pacemaker: - -Add OpenStack Image API resource to Pacemaker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You must first download the resource agent to your system: - -.. code-block:: console - - # cd /usr/lib/ocf/resource.d/openstack - # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api - # chmod a+rx * - -You can now add the Pacemaker configuration -for the OpenStack Image API resource. -Use the :command:`crm configure` command -to connect to the Pacemaker cluster -and add the following cluster resources: - -:: - - primitive p_glance-api ocf:openstack:glance-api \ - params config="/etc/glance/glance-api.conf" \ - os_password="secretsecret" \ - os_username="admin" os_tenant_name="admin" \ - os_auth_url="http://10.0.0.11:5000/v2.0/" \ - op monitor interval="30s" timeout="30s" - -This configuration creates ``p_glance-api``, -a resource for managing the OpenStack Image API service. - -The :command:`crm configure` command supports batch input, -so you may copy and paste the above into your live Pacemaker configuration -and then make changes as required. -For example, you may enter edit ``p_ip_glance-api`` -from the :command:`crm configure` menu -and edit the resource to match your preferred virtual IP address. - -After completing these steps, -commit your configuration changes by entering :command:`commit` -from the :command:`crm configure` menu. -Pacemaker then starts the OpenStack Image API service -and its dependent resources on one of your nodes. - -.. _glance-api-configure: - -Configure OpenStack Image service API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the :file:`/etc/glance/glance-api.conf` file -to configure the OpenStack image service: - -.. code-block:: ini - - # We have to use MySQL connection to store data: - sql_connection=mysql://glance:password@10.0.0.11/glance - # Alternatively, you can switch to pymysql, - # a new Python 3 compatible library and use - # sql_connection=mysql+pymysql://glance:password@10.0.0.11/glance - # and be ready when everything moves to Python 3. - # Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation - - # We bind OpenStack Image API to the VIP: - bind_host = 10.0.0.11 - - # Connect to OpenStack Image registry service: - registry_host = 10.0.0.11 - - # We send notifications to High Available RabbitMQ: - notifier_strategy = rabbit - rabbit_host = 10.0.0.11 - -[TODO: need more discussion of these parameters] - -.. _glance-services: - -Configure OpenStack services to use highly available OpenStack Image API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your OpenStack services must now point -their OpenStack Image API configuration to the highly available, -virtual cluster IP address -instead of pointint to the physical IP address -of an OpenStack Image API server -as you would in a non-HA cluster. - -For OpenStack Compute, for example, -if your OpenStack Image API service IP address is 10.0.0.11 -(as in the configuration explained here), -you would use the following configuration in your :file:`nova.conf` file: - -.. code-block:: ini - - [glance] - ... - api_servers = 10.0.0.11 - ... - - -You must also create the OpenStack Image API endpoint with this IP address. -If you are using both private and public IP addresses, -you should create two virtual IP addresses -and define your endpoint like this: - -.. code-block:: console - - $ keystone endpoint-create --region $KEYSTONE_REGION \ - --service-id $service-id --publicurl 'http://PUBLIC_VIP:9292' \ - --adminurl 'http://10.0.0.11:9292' \ - --internalurl 'http://10.0.0.11:9292' - - diff --git a/doc/ha-guide/source/storage-ha-manila.rst b/doc/ha-guide/source/storage-ha-manila.rst deleted file mode 100644 index a07e07c2..00000000 --- a/doc/ha-guide/source/storage-ha-manila.rst +++ /dev/null @@ -1,101 +0,0 @@ -.. highlight: ini - :linenothreshold: 5 - -======================================== -Highly available Shared File Systems API -======================================== - -Making the Shared File Systems (manila) API service highly available -in active/passive mode involves: - -- :ref:`ha-manila-pacemaker` -- :ref:`ha-manila-configure` -- :ref:`ha-manila-services` - -.. _ha-manila-pacemaker: - -Add Shared File Systems API resource to Pacemaker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You must first download the resource agent to your system: - -.. code-block:: console - - # cd /usr/lib/ocf/resource.d/openstack - # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/manila-api - # chmod a+rx * - -You can now add the Pacemaker configuration for the Shared File Systems -API resource. Connect to the Pacemaker cluster with the -:command:`crm configure` command and add the following cluster resources: - -:: - - primitive p_manila-api ocf:openstack:manila-api \ - params config="/etc/manila/manila.conf" - os_password="secretsecret" - os_username="admin" \ - os_tenant_name="admin" - keystone_get_token_url="http://10.0.0.11:5000/v2.0/tokens" \ - op monitor interval="30s" timeout="30s" - -This configuration creates ``p_manila-api``, a resource for managing the -Shared File Systems API service. - -The :command:`crm configure` supports batch input, so you may copy and paste -the lines above into your live Pacemaker configuration and then make changes -as required. For example, you may enter ``edit p_ip_manila-api`` from the -:command:`crm configure` menu and edit the resource to match your preferred -virtual IP address. - -Once completed, commit your configuration changes by entering :command:`commit` -from the :command:`crm configure` menu. Pacemaker then starts the -Shared File Systems API service and its dependent resources on one of your -nodes. - -.. _ha-manila-configure: - -Configure Shared File Systems API service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the :file:`/etc/manila/manila.conf` file: - -.. code-block:: ini - :linenos: - - # We have to use MySQL connection to store data: - sql_connection = mysql+pymysql://manila:password@10.0.0.11/manila?charset=utf8 - - # We bind Shared File Systems API to the VIP: - osapi_volume_listen = 10.0.0.11 - - # We send notifications to High Available RabbitMQ: - notifier_strategy = rabbit - rabbit_host = 10.0.0.11 - - -.. _ha-manila-services: - -Configure OpenStack services to use HA Shared File Systems API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your OpenStack services must now point their Shared File Systems API -configuration to the highly available, virtual cluster IP address rather than -a Shared File Systems API server’s physical IP address as you would -for a non-HA environment. - -You must create the Shared File Systems API endpoint with this IP. - -If you are using both private and public IP addresses, you should create two -virtual IPs and define your endpoints like this: - -.. code-block:: console - - $ openstack endpoint create --region RegionOne \ - sharev2 public 'http://PUBLIC_VIP:8786/v2/%(tenant_id)s' - - $ openstack endpoint create --region RegionOne \ - sharev2 internal 'http://10.0.0.11:8786/v2/%(tenant_id)s' - - $ openstack endpoint create --region RegionOne \ - sharev2 admin 'http://10.0.0.11:8786/v2/%(tenant_id)s' diff --git a/doc/ha-guide/source/storage-ha.rst b/doc/ha-guide/source/storage-ha.rst deleted file mode 100644 index c853277c..00000000 --- a/doc/ha-guide/source/storage-ha.rst +++ /dev/null @@ -1,13 +0,0 @@ -========================================= -Configuring Storage for high availability -========================================= - -.. toctree:: - :maxdepth: 2 - - storage-ha-cinder.rst - storage-ha-glance.rst - storage-ha-manila.rst - storage-ha-backend.rst - - diff --git a/other-requirements.txt b/other-requirements.txt deleted file mode 100644 index 13b92303..00000000 --- a/other-requirements.txt +++ /dev/null @@ -1,13 +0,0 @@ -# This is a cross-platform list tracking distribution packages needed by tests; -# see http://docs.openstack.org/infra/bindep/ for additional information. - -gettext -libxml2-dev [platform:dpkg] -libxml2-devel [platform:rpm] -libxml2-utils [platform:dpkg] -libxslt-devel [platform:rpm] -libxslt1-dev [platform:dpkg] -python-dev [platform:dpkg] -python-lxml -zlib-devel [platform:rpm] -zlib1g-dev [platform:dpkg] diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 0431aced..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -openstack-doc-tools>=0.31 - -sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 -openstackdocstheme>=1.2.3 -doc8 # Apache-2.0 diff --git a/tools/build-all-rst.sh b/tools/build-all-rst.sh deleted file mode 100755 index 457641a6..00000000 --- a/tools/build-all-rst.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -e - -mkdir -p publish-docs - -doc-tools-build-rst doc/ha-guide --build build \ - --target ha-guide diff --git a/tools/generatepot-rst.sh b/tools/generatepot-rst.sh deleted file mode 100755 index 319805f9..00000000 --- a/tools/generatepot-rst.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -xe - -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -DOCNAME=$1 - -if [ -z "$DOCNAME" ] ; then - echo "usage $0 DOCNAME" - exit 1 -fi - -# We're not doing anything for this directory. -if [[ "$DOCNAME" = "common" ]] ; then - exit 0 -fi - -rm -f doc/$DOCNAME/source/locale/$DOCNAME.pot -sphinx-build -b gettext doc/$DOCNAME/source/ doc/$DOCNAME/source/locale/ - -# common is translated as part of openstack-manuals, do not -# include the file in the combined tree. -rm doc/$DOCNAME/source/locale/common.pot - -# Take care of deleting all temporary files so that git add -# doc/$DOCNAME/source/locale will only add the single pot file. -# Remove UUIDs, those are not necessary and change too often -msgcat --sort-by-file doc/$DOCNAME/source/locale/*.pot | \ - awk '$0 !~ /^\# [a-z0-9]+$/' > doc/$DOCNAME/source/$DOCNAME.pot -rm doc/$DOCNAME/source/locale/*.pot -rm -rf doc/$DOCNAME/source/locale/.doctrees/ -mv doc/$DOCNAME/source/$DOCNAME.pot doc/$DOCNAME/source/locale/$DOCNAME.pot diff --git a/tox.ini b/tox.ini deleted file mode 100644 index e9ef5340..00000000 --- a/tox.ini +++ /dev/null @@ -1,76 +0,0 @@ -[tox] -minversion = 1.6 -envlist = checkniceness,checkbuild,checklang -skipsdist = True - -[testenv] -basepython = python2 -setenv = - VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/test-requirements.txt -whitelist_externals = - bash - cp - mkdir - rm - rsync - sed - -[testenv:venv] -commands = {posargs} - -[testenv:checkniceness] -commands = - doc8 doc - -[testenv:checkbuild] -commands = - # Build and copy RST Guides - {toxinidir}/tools/build-all-rst.sh - # This only generates the index page - openstack-indexpage publish-docs - -[testenv:publishdocs] -# Prepare all documents (except www subdir) so that they can get -# published on docs.openstack.org with just copying publish-docs/* -# over. -commands = - # Build and copy RST Guides - {toxinidir}/tools/build-all-rst.sh - -[testenv:checklang] -whitelist_externals = - doc-tools-check-languages - bash -commands = - doc-tools-check-languages doc-tools-check-languages.conf test all - # Check that .po and .pot files are valid: - bash -c "find doc -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" - -[testenv:buildlang] -# Run as "tox -e buildlang -- $LANG" -whitelist_externals = doc-tools-check-languages -commands = doc-tools-check-languages doc-tools-check-languages.conf test {posargs} - -[testenv:publishlang] -whitelist_externals = doc-tools-check-languages -commands = doc-tools-check-languages doc-tools-check-languages.conf publish all - -[testenv:generatepot-rst] -# Generate POT files for translation, needs {posargs} like: -# tox -e generatepot-rst -- user-guide -commands = {toxinidir}/tools/generatepot-rst.sh {posargs} - -[testenv:docs] -commands = - {toxinidir}/tools/build-all-rst.sh - -[doc8] -# Settings for doc8: -# Ignore target directories -ignore-path = doc/*/target,doc/common -# File extensions to use -extensions = .rst,.txt -# Disable some doc8 checks: -# D000: Check RST validity (cannot handle lineos directive) -ignore = D000