From ad5004e927d2e0585a2f584ce5cca233c25848cf Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Mon, 12 Oct 2015 09:37:09 +0200 Subject: [PATCH] Delete XML HA Guide Only publish the RST HA Guide. Publish it under docs.openstack.org/ha-guide, so not as draft anymore. Change-Id: I4afef0100d21aaa7215ac7f1ee49fdd4c3e7a4e3 --- README.rst | 37 +- doc-tools-check-languages.conf | 12 +- .../api/section_api_pacemaker.xml | 13 - .../api/section_api_vip.xml | 16 - .../api/section_ceilometer_agent_central.xml | 72 - .../api/section_cinder_api.xml | 93 - .../api/section_glance_api.xml | 110 - .../api/section_keystone.xml | 102 - .../api/section_neutron_server.xml | 101 - doc/high-availability-guide/bk-ha-guide.xml | 111 - doc/high-availability-guide/ch_api.xml | 21 - doc/high-availability-guide/ch_controller.xml | 15 - .../ch_ha_aa_controllers.xml | 22 - doc/high-availability-guide/ch_ha_aa_db.xml | 33 - .../ch_ha_aa_haproxy.xml | 185 -- .../ch_ha_aa_network.xml | 37 - .../ch_ha_aa_rabbitmq.xml | 25 - doc/high-availability-guide/ch_intro.xml | 127 - doc/high-availability-guide/ch_network.xml | 23 - doc/high-availability-guide/ch_pacemaker.xml | 31 - .../controller/section_mysql.xml | 229 -- .../controller/section_rabbitmq.xml | 215 -- .../figures/Check_mark_23x20_02.svg | 60 - doc/high-availability-guide/figures/README | 1 - .../ha_aa_controllers/section_memcached.xml | 28 - ...ction_run_openstack_api_and_schedulers.xml | 74 - .../ha_aa_db/section_mariadb_galera_rh.xml | 152 - .../ha_aa_db/section_mysql_galera.xml | 195 -- .../section_run_neutron_dhcp_agent.xml | 16 - .../section_run_neutron_l3_agent.xml | 82 - .../section_run_neutron_lbaas_agent.xml | 13 - .../section_run_neutron_metadata_agent.xml | 13 - ...ure_openstack_services_to_use_rabbitmq.xml | 64 - .../section_configure_rabbitmq.xml | 61 - .../section_install_rabbitmq.xml | 68 - .../includes/corosync.conf | 67 - .../includes/mysql.res | 11 - .../includes/pacemaker-api.crm | 2 - .../includes/pacemaker-api_vip.crm | 3 - .../pacemaker-ceilometer_agent_central.crm | 3 - .../includes/pacemaker-cinder_api.crm | 4 - .../includes/pacemaker-glance_api.crm | 3 - .../includes/pacemaker-keystone.crm | 3 - .../includes/pacemaker-mysql.crm | 33 - .../includes/pacemaker-network-dhcp.crm | 4 - .../includes/pacemaker-network-l3.crm | 4 - .../includes/pacemaker-network-metadata.crm | 4 - .../includes/pacemaker-network.crm | 2 - .../includes/pacemaker-neutron_server.crm | 4 - .../includes/pacemaker-properties.crm | 5 - .../includes/pacemaker-rabbitmq.crm | 27 - .../includes/rabbitmq.res | 11 - .../locale/high-availability-guide.pot | 2008 ------------- doc/high-availability-guide/locale/ja.po | 1602 ---------- doc/high-availability-guide/locale/zh_CN.po | 2647 ----------------- ...on_highly_available_neutron_dhcp_agent.xml | 45 - ...tion_highly_available_neutron_l3_agent.xml | 49 - ...ighly_available_neutron_metadata_agent.xml | 41 - .../section_manage_network_resources.xml | 15 - doc/high-availability-guide/openstack.ent | 25 - .../pacemaker/section_install_packages.xml | 44 - .../section_set_basic_cluster_properties.xml | 54 - .../pacemaker/section_set_up_corosync.xml | 397 --- .../pacemaker/section_start_pacemaker.xml | 48 - .../pacemaker/section_starting_corosync.xml | 62 - .../part_active_active.xml | 13 - .../part_active_passive.xml | 12 - doc/high-availability-guide/pom.xml | 78 - doc/pom.xml | 46 - tools/build-all-rst.sh | 2 +- tox.ini | 15 +- 71 files changed, 18 insertions(+), 9832 deletions(-) delete mode 100644 doc/high-availability-guide/api/section_api_pacemaker.xml delete mode 100644 doc/high-availability-guide/api/section_api_vip.xml delete mode 100644 doc/high-availability-guide/api/section_ceilometer_agent_central.xml delete mode 100644 doc/high-availability-guide/api/section_cinder_api.xml delete mode 100644 doc/high-availability-guide/api/section_glance_api.xml delete mode 100644 doc/high-availability-guide/api/section_keystone.xml delete mode 100644 doc/high-availability-guide/api/section_neutron_server.xml delete mode 100644 doc/high-availability-guide/bk-ha-guide.xml delete mode 100644 doc/high-availability-guide/ch_api.xml delete mode 100644 doc/high-availability-guide/ch_controller.xml delete mode 100644 doc/high-availability-guide/ch_ha_aa_controllers.xml delete mode 100644 doc/high-availability-guide/ch_ha_aa_db.xml delete mode 100644 doc/high-availability-guide/ch_ha_aa_haproxy.xml delete mode 100644 doc/high-availability-guide/ch_ha_aa_network.xml delete mode 100644 doc/high-availability-guide/ch_ha_aa_rabbitmq.xml delete mode 100644 doc/high-availability-guide/ch_intro.xml delete mode 100644 doc/high-availability-guide/ch_network.xml delete mode 100644 doc/high-availability-guide/ch_pacemaker.xml delete mode 100644 doc/high-availability-guide/controller/section_mysql.xml delete mode 100644 doc/high-availability-guide/controller/section_rabbitmq.xml delete mode 100644 doc/high-availability-guide/figures/Check_mark_23x20_02.svg delete mode 100644 doc/high-availability-guide/figures/README delete mode 100644 doc/high-availability-guide/ha_aa_controllers/section_memcached.xml delete mode 100644 doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml delete mode 100644 doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml delete mode 100644 doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml delete mode 100644 doc/high-availability-guide/ha_aa_network/section_run_neutron_dhcp_agent.xml delete mode 100644 doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml delete mode 100644 doc/high-availability-guide/ha_aa_network/section_run_neutron_lbaas_agent.xml delete mode 100644 doc/high-availability-guide/ha_aa_network/section_run_neutron_metadata_agent.xml delete mode 100644 doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml delete mode 100644 doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml delete mode 100644 doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml delete mode 100644 doc/high-availability-guide/includes/corosync.conf delete mode 100644 doc/high-availability-guide/includes/mysql.res delete mode 100644 doc/high-availability-guide/includes/pacemaker-api.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-api_vip.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-ceilometer_agent_central.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-cinder_api.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-glance_api.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-keystone.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-mysql.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-network-dhcp.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-network-l3.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-network-metadata.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-network.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-neutron_server.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-properties.crm delete mode 100644 doc/high-availability-guide/includes/pacemaker-rabbitmq.crm delete mode 100644 doc/high-availability-guide/includes/rabbitmq.res delete mode 100644 doc/high-availability-guide/locale/high-availability-guide.pot delete mode 100644 doc/high-availability-guide/locale/ja.po delete mode 100644 doc/high-availability-guide/locale/zh_CN.po delete mode 100644 doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml delete mode 100644 doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml delete mode 100644 doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml delete mode 100644 doc/high-availability-guide/network/section_manage_network_resources.xml delete mode 100644 doc/high-availability-guide/openstack.ent delete mode 100644 doc/high-availability-guide/pacemaker/section_install_packages.xml delete mode 100644 doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml delete mode 100644 doc/high-availability-guide/pacemaker/section_set_up_corosync.xml delete mode 100644 doc/high-availability-guide/pacemaker/section_start_pacemaker.xml delete mode 100644 doc/high-availability-guide/pacemaker/section_starting_corosync.xml delete mode 100644 doc/high-availability-guide/part_active_active.xml delete mode 100644 doc/high-availability-guide/part_active_passive.xml delete mode 100644 doc/high-availability-guide/pom.xml delete mode 100644 doc/pom.xml diff --git a/README.rst b/README.rst index d2204df4..f3290183 100644 --- a/README.rst +++ b/README.rst @@ -6,44 +6,13 @@ This repository contains the OpenStack High Availability Guide. For more details, see the `OpenStack Documentation wiki page `_. -Prerequisites -============= - -`Apache Maven `_ must be installed to build the -documentation. - -To install Maven 3 for Ubuntu 12.04 and later, and Debian wheezy and later:: - - apt-get install maven - -On Fedora 20 and later:: - - yum install maven - -On openSUSE 13.1 and later:: - - zypper ar http://download.opensuse.org/repositories/devel:/tools:/building/openSUSE_13.1/devel:tools:building.repo - zypper install maven - Building ======== The root directory of the *OpenStack High Availability Guide* -is ``doc/high-availability-guide``. +is ``doc/ha-guide``. -To build the guide, move into the directory ``doc/high-availability-guide``, -then run the ``mvn`` command in that directory:: - - cd doc/high-availability-guide/ - mvn clean generate-sources - -The generated PDF documentation file is:: - - doc/high-availability-guide/target/docbkx/webhelp/high-availability-guide/high-availability-guide.pdf - -The root of the generated HTML documentation is:: - - doc/high-availability-guide/target/docbkx/webhelp/high-availability-guide/index.html +To build the guide, run ``tox -e docs``. Testing of changes and building of the manual ============================================= @@ -55,8 +24,6 @@ gating jobs. If you like to run individual tests, run: * ``tox -e checkniceness`` - to run the niceness tests - * ``tox -e checksyntax`` - to run syntax checks - * ``tox -e checkdeletions`` - to check that no deleted files are referenced * ``tox -e checkbuild`` - to actually build the manual tox will use the openstack-doc-tools package for execution of these diff --git a/doc-tools-check-languages.conf b/doc-tools-check-languages.conf index d2fbac6a..683d1779 100644 --- a/doc-tools-check-languages.conf +++ b/doc-tools-check-languages.conf @@ -2,14 +2,18 @@ # directories to be set up declare -A DIRECTORIES=( - ["ja"]="high-availability-guide glossary" - ["zh_CN"]="high-availability-guide glossary" ) # books to be built declare -A BOOKS=( - ["ja"]="high-availability-guide" - ["zh_CN"]="high-availability-guide" + # No translations currently + # Example how to enable: + #["ja"]="ha-guide" +) + +# draft books +declare -A DRAFTS=( + ["ja"]="ha-guide" ) # Where does the top-level pom live? diff --git a/doc/high-availability-guide/api/section_api_pacemaker.xml b/doc/high-availability-guide/api/section_api_pacemaker.xml deleted file mode 100644 index 3864b91e..00000000 --- a/doc/high-availability-guide/api/section_api_pacemaker.xml +++ /dev/null @@ -1,13 +0,0 @@ - -
- Configure Pacemaker group - Finally, we need to create a service group - to ensure that the virtual IP is linked to the API services - resources: - group g_services_api p_api-ip p_keystone p_glance-api p_cinder-api \ - p_neutron-server p_glance-registry p_ceilometer-agent-central -
diff --git a/doc/high-availability-guide/api/section_api_vip.xml b/doc/high-availability-guide/api/section_api_vip.xml deleted file mode 100644 index 0d4dc5cb..00000000 --- a/doc/high-availability-guide/api/section_api_vip.xml +++ /dev/null @@ -1,16 +0,0 @@ - -
- Configure the VIP - First, you must select and assign a virtual IP address (VIP) - that can freely float between cluster nodes. - This configuration creates p_ip_api, a - virtual IP address for use by the API node - (192.168.42.103): - primitive p_api-ip ocf:heartbeat:IPaddr2 \ - params ip="192.168.42.103" cidr_netmask="24" \ - op monitor interval="30s" -
diff --git a/doc/high-availability-guide/api/section_ceilometer_agent_central.xml b/doc/high-availability-guide/api/section_ceilometer_agent_central.xml deleted file mode 100644 index 4f8b4a4e..00000000 --- a/doc/high-availability-guide/api/section_ceilometer_agent_central.xml +++ /dev/null @@ -1,72 +0,0 @@ - -
- Highly available Telemetry central agent - Telemetry (ceilometer) is the metering and monitoring service in - OpenStack. The Central agent polls for resource utilization - statistics for resources not tied to instances or compute nodes. - - Due to limitations of a polling model, a single instance of this agent - can be polling a given list of meters, unless workload partitioning has - been configured for multiple central agents. In this setup, we install - this service on the API nodes also in the active / passive mode. - - Making the Telemetry central agent service highly available in - active / passive mode involves managing its daemon with the Pacemaker - cluster manager. - - You will find at this page - the process to install the Telemetry central agent. - -
- Add the Telemetry central agent resource to Pacemaker - First of all, you need to download the resource agent to your system: - # cd /usr/lib/ocf/resource.d/openstack -# wget https://raw.github.com/madkiss/openstack-resource-agents/master/ocf/ceilometer-agent-central -# chmod a+rx * - You may then proceed with adding the Pacemaker configuration for - the Telemetry central agent resource. Connect to the Pacemaker cluster - with crm configure, and add the following cluster - resources: - primitive p_ceilometer-agent-central \ -ocf:openstack:ceilometer-agent-central \ -params config="/etc/ceilometer/ceilometer.conf" \ -op monitor interval="30s" timeout="30s" - This configuration creates - - - p_ceilometer-agent-central, a resource for managing the - Ceilometer Central Agent service - - - crm configure supports batch input, so you may copy - and paste the above into your live pacemaker configuration, and then make - changes as required. - Once completed, commit your configuration changes by entering - commit from the crm configure menu. - Pacemaker will then start the Ceilometer Central Agent service, and its - dependent resources, on one of your nodes. -
-
- Configure Telemetry central agent service - Edit /etc/ceilometer/ceilometer.conf: - # We use API VIP for Identity Service connection: -os_auth_url=http://192.168.42.103:5000/v2.0 - -# We send notifications to High Available RabbitMQ: -notifier_strategy = rabbit -rabbit_host = 192.168.42.102 - -[database] -# We have to use MySQL connection to store data: -sql_connection=mysql://ceilometer:password@192.168.42.101/ceilometer -# Alternatively, you can switch to pymysql, -# a new Python 3 compatible library and use -# sql_connection=mysql+pymysql://ceilometer:password@192.168.42.101/ceilometer -# and be ready when everything moves to Python 3. -# Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation -
-
diff --git a/doc/high-availability-guide/api/section_cinder_api.xml b/doc/high-availability-guide/api/section_cinder_api.xml deleted file mode 100644 index 335c3359..00000000 --- a/doc/high-availability-guide/api/section_cinder_api.xml +++ /dev/null @@ -1,93 +0,0 @@ - -
- Highly available Block Storage API - Making the Block Storage (cinder) API service highly available in - active / passive mode involves: - - - Configuring Block Storage to listen on the VIP address - - - Managing Block Storage API daemon with the Pacemaker cluster - manager - - - Configuring OpenStack services to use this IP address - - - - Here is the - documentation - for installing Block Storage service. - -
- Add Block Storage API resource to Pacemaker - First of all, you need to download the resource agent to your - system: - # cd /usr/lib/ocf/resource.d/openstack -# wget https://raw.github.com/madkiss/openstack-resource-agents/master/ocf/cinder-api -# chmod a+rx * - You can now add the Pacemaker configuration for Block Storage API - resource. Connect to the Pacemaker cluster with crm configure, - and add the following cluster resources: - primitive p_cinder-api ocf:openstack:cinder-api \ -params config="/etc/cinder/cinder.conf" os_password="secretsecret" os_username="admin" \ -os_tenant_name="admin" keystone_get_token_url="http://192.168.42.103:5000/v2.0/tokens" \ -op monitor interval="30s" timeout="30s" - This configuration creates - - - p_cinder-api, a resource for manage Block - Storage API service - - - crm configure supports batch input, so you may - copy and paste the above into your live pacemaker configuration, and - then make changes as required. For example, you may enter - edit p_ip_cinder-api from the crm configure - menu and edit the resource to match your preferred virtual IP address. - Once completed, commit your configuration changes by entering - commit from the crm configure menu. - Pacemaker will then start the Block Storage API service, and its dependent - resources, on one of your nodes. -
-
- Configure Block Storage API service - Edit /etc/cinder/cinder.conf: - # We have to use MySQL connection to store data: -sql_connection=mysql://cinder:password@192.168.42.101/cinder -# Alternatively, you can switch to pymysql, -# a new Python 3 compatible library and use -# sql_connection=mysql+pymysql://cinder:password@192.168.42.101/cinder -# and be ready when everything moves to Python 3. -# Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation - -# We bind Block Storage API to the VIP: -osapi_volume_listen = 192.168.42.103 - -# We send notifications to High Available RabbitMQ: -notifier_strategy = rabbit -rabbit_host = 192.168.42.102 -
-
- Configure OpenStack services to use highly available Block - Storage API - Your OpenStack services must now point their Block Storage API - configuration to the highly available, virtual cluster IP address — rather - than a Block Storage API server’s physical IP address as you normally - would. - You must create the Block Storage API endpoint with this IP. - - If you are using both private and public IP, you should create - two Virtual IPs and define your endpoint like this: - - $ keystone endpoint-create --region $KEYSTONE_REGION \ ---service-id $service-id --publicurl 'http://PUBLIC_VIP:8776/v1/%(tenant_id)s' \ ---adminurl 'http://192.168.42.103:8776/v1/%(tenant_id)s' \ ---internalurl 'http://192.168.42.103:8776/v1/%(tenant_id)s' -
-
diff --git a/doc/high-availability-guide/api/section_glance_api.xml b/doc/high-availability-guide/api/section_glance_api.xml deleted file mode 100644 index 63162ca0..00000000 --- a/doc/high-availability-guide/api/section_glance_api.xml +++ /dev/null @@ -1,110 +0,0 @@ - -
- Highly available OpenStack Image API - The OpenStack Image service offers a service for discovering, registering, - and retrieving virtual machine images. To make the OpenStack Image API - service highly available in active / passive mode, you must: - - - Configure the OpenStack Image service to listen on the VIP address. - - - Manage the OpenStack Image API daemon with the Pacemaker cluster - manager. - - - Configure OpenStack services to use this IP address. - - - - Here is the documentation - for installing the OpenStack Image API service. - -
- Add OpenStack Image API resource to Pacemaker - First of all, you need to download the resource agent to your - system: - # cd /usr/lib/ocf/resource.d/openstack -# wget https://raw.github.com/madkiss/openstack-resource-agents/master/ocf/glance-api -# chmod a+rx * - You can now add the Pacemaker configuration for the OpenStack Image API - resource. Connect to the Pacemaker cluster with crm - configure, and add the following cluster resources: - primitive p_glance-api ocf:openstack:glance-api \ -params config="/etc/glance/glance-api.conf" os_password="secretsecret" \ -os_username="admin" os_tenant_name="admin" os_auth_url="http://192.168.42.103:5000/v2.0/" \ -op monitor interval="30s" timeout="30s" - This configuration creates - - - p_glance-api, a resource for managing - OpenStack Image API service - - - crm configure supports batch input, so you may - copy and paste the above into your live Pacemaker configuration, and - then make changes as required. For example, you may enter edit - p_ip_glance-api from the crm configure - menu and edit the resource to match your preferred virtual IP address. - Once completed, commit your configuration changes by entering - commit from the crm configure menu. - Pacemaker will then start the OpenStack Image API service, and its - dependent resources, on one of your nodes. -
-
- Configure OpenStack Image service API - Edit /etc/glance/glance-api.conf: - # We have to use MySQL connection to store data: -sql_connection=mysql://glance:password@192.168.42.101/glance -# Alternatively, you can switch to pymysql, -# a new Python 3 compatible library and use -# sql_connection=mysql+pymysql://glance:password@192.168.42.101/glance -# and be ready when everything moves to Python 3. -# Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation - -# We bind OpenStack Image API to the VIP: -bind_host = 192.168.42.103 - -# Connect to OpenStack Image registry service: -registry_host = 192.168.42.103 - -# We send notifications to High Available RabbitMQ: -notifier_strategy = rabbit -rabbit_host = 192.168.42.102 -
-
- Configure OpenStack services to use high available OpenStack - Image API - Your OpenStack services must now point their OpenStack Image API - configuration to the highly available, virtual cluster IP address — rather - than an OpenStack Image API server’s physical IP address as you normally - would. - For OpenStack Compute, for example, if your OpenStack Image API - service IP address is 192.168.42.103 as in the - configuration explained here, you would use the following - configuration in your nova.conf file: - [glance] -... -api_servers = 192.168.42.103 -... - - In versions prior to Juno, this option was called - glance_api_servers in the - [DEFAULT] section. - - You must also create the OpenStack Image API endpoint with this - IP. - - If you are using both private and public IP addresses, you should - create two Virtual IP addresses and define your endpoint like this: - - $ keystone endpoint-create --region $KEYSTONE_REGION \ ---service-id $service-id --publicurl 'http://PUBLIC_VIP:9292' \ ---adminurl 'http://192.168.42.103:9292' \ ---internalurl 'http://192.168.42.103:9292' -
-
diff --git a/doc/high-availability-guide/api/section_keystone.xml b/doc/high-availability-guide/api/section_keystone.xml deleted file mode 100644 index 65e265e0..00000000 --- a/doc/high-availability-guide/api/section_keystone.xml +++ /dev/null @@ -1,102 +0,0 @@ - -
- Highly available OpenStack Identity - OpenStack Identity is the Identity Service in OpenStack and used by - many services. Making the OpenStack Identity service highly available in - active / passive mode involves - - - Configure OpenStack Identity to listen on the VIP address, - - - Managing OpenStack Identity daemon with the Pacemaker cluster - manager, - - - Configure OpenStack services to use this IP address. - - - - Here is the documentation - for installing OpenStack Identity service. - -
- Add OpenStack Identity resource to Pacemaker - First of all, you need to download the resource agent to your - system: - # cd /usr/lib/ocf/resource.d -# mkdir openstack -# cd openstack -# wget https://raw.github.com/madkiss/openstack-resource-agents/master/ocf/keystone -# chmod a+rx * - You can now add the Pacemaker configuration for OpenStack Identity - resource. Connect to the Pacemaker cluster with crm configure, - and add the following cluster resources: - primitive p_keystone ocf:openstack:keystone \ -params config="/etc/keystone/keystone.conf" os_password="secretsecret" \ -os_username="admin" os_tenant_name="admin" os_auth_url="http://192.168.42.103:5000/v2.0/" \ -op monitor interval="30s" timeout="30s" - This configuration creates p_keystone, a resource - for managing the OpenStack Identity service. - crm configure supports batch input, so you may - copy and paste the above into your live pacemaker configuration, and - then make changes as required. For example, you may enter - edit p_ip_keystone from the crm configure - menu and edit the resource to match your preferred virtual IP address. - Once completed, commit your configuration changes by entering - commit from the crm configure menu. - Pacemaker will then start the OpenStack Identity service, and its dependent - resources, on one of your nodes. -
-
- Configure OpenStack Identity service - You need to edit your OpenStack Identity configuration file - (keystone.conf) and change the bind - parameters: - On Havana: - bind_host = 192.168.42.103 - The admin_bind_host option lets you use a private - network for the admin access. - public_bind_host = 192.168.42.103 -admin_bind_host = 192.168.42.103 - To be sure all data will be highly available, you should be sure - that you store everything in the MySQL database (which is also highly - available): - [catalog] -driver = keystone.catalog.backends.sql.Catalog -... -[identity] -driver = keystone.identity.backends.sql.Identity -... -
-
- Configure OpenStack services to use the highly available OpenStack - Identity - Your OpenStack services must now point their OpenStack Identity - configuration to the highly available, virtual cluster IP address — rather - than a OpenStack Identity server’s physical IP address as you normally - would. - For example with OpenStack Compute, if your OpenStack Identity - service IP address is 192.168.42.103 as in the - configuration explained here, you would use the following line in your - API configuration file (api-paste.ini): - auth_host = 192.168.42.103 - You also need to create the OpenStack Identity Endpoint with this - IP. - - If you are using both private and public IP addresses, you should - create two Virtual IP addresses and define your endpoint like this: - $ keystone endpoint-create --region $KEYSTONE_REGION \ ---service-id $service-id --publicurl 'http://PUBLIC_VIP:5000/v2.0' \ ---adminurl 'http://192.168.42.103:35357/v2.0' \ ---internalurl 'http://192.168.42.103:5000/v2.0' - - If you are using the horizon dashboard, you should edit the - local_settings.py file: - OPENSTACK_HOST = 192.168.42.103 -
-
diff --git a/doc/high-availability-guide/api/section_neutron_server.xml b/doc/high-availability-guide/api/section_neutron_server.xml deleted file mode 100644 index fc415619..00000000 --- a/doc/high-availability-guide/api/section_neutron_server.xml +++ /dev/null @@ -1,101 +0,0 @@ - -
- Highly available OpenStack Networking server - OpenStack Networking is the network connectivity service in OpenStack. - Making the OpenStack Networking Server service highly available in - active / passive mode involves the following tasks: - - - Configure OpenStack Networking to listen on the virtual - IP address, - - - Manage the OpenStack Networking API Server daemon with the - Pacemaker cluster manager, - - - Configure OpenStack services to use the virtual IP address. - - - - Here is the documentation - for installing OpenStack Networking service. - -
- Add OpenStack Networking Server resource to Pacemaker - First of all, you need to download the resource agent to your - system: - # cd /usr/lib/ocf/resource.d/openstack -# wget https://raw.github.com/madkiss/openstack-resource-agents/master/ocf/neutron-server -# chmod a+rx * - You can now add the Pacemaker configuration for - OpenStack Networking Server resource. Connect to the Pacemaker cluster - with crm configure, and add the following cluster - resources: - primitive p_neutron-server ocf:openstack:neutron-server \ -params os_password="secretsecret" os_username="admin" os_tenant_name="admin" \ -keystone_get_token_url="http://192.168.42.103:5000/v2.0/tokens" \ -op monitor interval="30s" timeout="30s" - This configuration creates p_neutron-server, - a resource for manage OpenStack Networking Server service - crm configure supports batch input, so you may - copy and paste the above into your live pacemaker configuration, and - then make changes as required. For example, you may enter - edit p_neutron-server from the - crm configure menu and edit the resource to match - your preferred virtual IP address. - Once completed, commit your configuration changes by entering - commit from the crm configure - menu. Pacemaker will then start the OpenStack Networking API - service, and its dependent resources, on one of your nodes. -
-
- Configure OpenStack Networking server - Edit /etc/neutron/neutron.conf: - # We bind the service to the VIP: -bind_host = 192.168.42.103 - -# We bind OpenStack Networking Server to the VIP: -bind_host = 192.168.42.103 - -# We send notifications to Highly available RabbitMQ: -notifier_strategy = rabbit -rabbit_host = 192.168.42.102 - -[database] -# We have to use MySQL connection to store data: -connection = mysql://neutron:password@192.168.42.101/neutron -# Alternatively, you can switch to pymysql, -# a new Python 3 compatible library and use -# connection=mysql+pymysql://neutron:password@192.168.42.101/neutron -# and be ready when everything moves to Python 3. -# Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation -
-
- Configure OpenStack services to use highly available OpenStack - Networking server - Your OpenStack services must now point their OpenStack Networking - Server configuration to the highly available, virtual cluster IP - address — rather than an OpenStack Networking server’s physical IP - address as you normally would. - For example, you should configure OpenStack Compute for using highly - available OpenStack Networking server in editing - nova.conf file: - neutron_url = http://192.168.42.103:9696 - You need to create the OpenStack Networking server endpoint with - this IP. - - If you are using both private and public IP addresses, you should - create two Virtual IP addresses and define your endpoint like - this: - - $ keystone endpoint-create --region $KEYSTONE_REGION --service-id $service-id \ ---publicurl 'http://PUBLIC_VIP:9696/' \ ---adminurl 'http://192.168.42.103:9696/' \ ---internalurl 'http://192.168.42.103:9696/' -
-
diff --git a/doc/high-availability-guide/bk-ha-guide.xml b/doc/high-availability-guide/bk-ha-guide.xml deleted file mode 100644 index 57da1697..00000000 --- a/doc/high-availability-guide/bk-ha-guide.xml +++ /dev/null @@ -1,111 +0,0 @@ - - - OpenStack High Availability Guide - - - - - - - - OpenStack Contributors - - - - 2012 - 2013 - 2014 - OpenStack Contributors - - current - OpenStack - - - - Copyright details are filled in by the template. - - - - This guide describes how to install, configure, and manage - OpenStack for high availability. - - - - 2015-04-30 - - - - This guide has various updates for the Kilo - release, such as adding MariaDB, updates to the - MySQL information, corosync and networking - updates. - - - - - - 2014-10-17 - - - - This guide has gone through editorial changes to - follow the OpenStack documentation - conventions. Various smaller issues have been - fixed. - - - - - - 2014-05-16 - - - - Conversion to DocBook. - - - - - - 2014-04-17 - - - - Minor cleanup of typos, otherwise no major revisions for - Icehouse release. - - - - - - 2012-01-16 - - - - Organizes guide based on cloud controller and compute nodes. - - - - - - 2012-05-24 - - - - Begin trunk designation. - - - - - - - - - - - - diff --git a/doc/high-availability-guide/ch_api.xml b/doc/high-availability-guide/ch_api.xml deleted file mode 100644 index 6d71b2bc..00000000 --- a/doc/high-availability-guide/ch_api.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - API node cluster stack - - The API node exposes OpenStack API endpoints onto external network (Internet). -It must talk to the cloud controller on the management network. - - - - - - - - - - diff --git a/doc/high-availability-guide/ch_controller.xml b/doc/high-availability-guide/ch_controller.xml deleted file mode 100644 index 96e3754b..00000000 --- a/doc/high-availability-guide/ch_controller.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - Cloud controller cluster stack - - The cloud controller runs on the management network and must talk to all other services. - - - - - diff --git a/doc/high-availability-guide/ch_ha_aa_controllers.xml b/doc/high-availability-guide/ch_ha_aa_controllers.xml deleted file mode 100644 index 98681706..00000000 --- a/doc/high-availability-guide/ch_ha_aa_controllers.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - OpenStack controller nodes - OpenStack controller nodes contain: - - - All OpenStack API services - - - All OpenStack schedulers - - - Memcached service - - - - - diff --git a/doc/high-availability-guide/ch_ha_aa_db.xml b/doc/high-availability-guide/ch_ha_aa_db.xml deleted file mode 100644 index d1ce225f..00000000 --- a/doc/high-availability-guide/ch_ha_aa_db.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - Database - The first step is installing the database that sits at the heart of - the cluster. When we talk about high availability (HA), we talk about several - databases (for redundancy) and a means to keep them synchronized. In - this case, we choose the MySQL database, - along with Galera plug-in for synchronous multi-master - replication. - The Galera Cluster plug-in is a multi-master Cluster based on - synchronous replication. It is a high availability solution, which provides - high system uptime, no data loss, and scalability - for growth. - - The choice of database is not a foregone conclusion; you are not - required to use MySQL. It is, however, a - fairly common choice in OpenStack installations, so we will cover it - here. - - - MySQL with Galera is by no means the only way to achieve database HA. - MariaDB Galera Cluster (https://mariadb.org/) - and Percona XtraDB Cluster (http://www.percona.com/) - also work with Galera. You also have the option to use PostgreSQL, which - has its own replication, or another database HA option. - - - - diff --git a/doc/high-availability-guide/ch_ha_aa_haproxy.xml b/doc/high-availability-guide/ch_ha_aa_haproxy.xml deleted file mode 100644 index 2dc8ef79..00000000 --- a/doc/high-availability-guide/ch_ha_aa_haproxy.xml +++ /dev/null @@ -1,185 +0,0 @@ - - - - HAProxy nodes - - HAProxy is a very fast and reliable solution offering high availability, load balancing, and proxying -for TCP and HTTP-based applications. It is particularly suited for web sites crawling under very high loads -while needing persistence or Layer 7 processing. Supporting tens of thousands of connections is clearly -realistic with today’s hardware. - - For installing HAProxy on your nodes, you should consider its - official documentation. - And also, you have to consider that HAProxy should not be a single point - of failure so you need to ensure its availability by other means, - such as Pacemaker or Keepalived. It is advisable to have multiple HAProxy - instances running, where the number of these instances is a small odd - number like 3 or 5. Also it is a common practice to collocate HAProxy - instances with existing OpenStack controllers. - - Here is an example for the HAProxy configuration file: - global - chroot /var/lib/haproxy - daemon - group haproxy - maxconn 4000 - pidfile /var/run/haproxy.pid - user haproxy - -defaults - log global - maxconn 4000 - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout check 10s - -listen dashboard_cluster - bind <Virtual IP>:443 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.1:443 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:443 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:443 check inter 2000 rise 2 fall 5 - -listen galera_cluster - bind <Virtual IP>:3306 - balance source - option httpchk - server controller1 10.0.0.4:3306 check port 9200 inter 2000 rise 2 fall 5 - server controller2 10.0.0.5:3306 backup check port 9200 inter 2000 rise 2 fall 5 - server controller3 10.0.0.6:3306 backup check port 9200 inter 2000 rise 2 fall 5 - -listen glance_api_cluster - bind <Virtual IP>:9292 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.1:9292 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:9292 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:9292 check inter 2000 rise 2 fall 5 - -listen glance_registry_cluster - bind <Virtual IP>:9191 - balance source - option tcpka - option tcplog - server controller1 10.0.0.1:9191 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:9191 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:9191 check inter 2000 rise 2 fall 5 - -listen keystone_admin_cluster - bind <Virtual IP>:35357 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.1:35357 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:35357 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:35357 check inter 2000 rise 2 fall 5 - -listen keystone_public_internal_cluster - bind <Virtual IP>:5000 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.1:5000 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:5000 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:5000 check inter 2000 rise 2 fall 5 - -listen nova_ec2_api_cluster - bind <Virtual IP>:8773 - balance source - option tcpka - option tcplog - server controller1 10.0.0.1:8773 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:8773 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:8773 check inter 2000 rise 2 fall 5 - -listen nova_compute_api_cluster - bind <Virtual IP>:8774 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.1:8774 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:8774 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:8774 check inter 2000 rise 2 fall 5 - -listen nova_metadata_api_cluster - bind <Virtual IP>:8775 - balance source - option tcpka - option tcplog - server controller1 10.0.0.1:8775 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:8775 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:8775 check inter 2000 rise 2 fall 5 - -listen cinder_api_cluster - bind <Virtual IP>:8776 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.1:8776 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:8776 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:8776 check inter 2000 rise 2 fall 5 - -listen ceilometer_api_cluster - bind <Virtual IP>:8777 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.1:8777 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:8777 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:8777 check inter 2000 rise 2 fall 5 - -listen spice_cluster - bind <Virtual IP>:6080 - balance source - option tcpka - option tcplog - server controller1 10.0.0.1:6080 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:6080 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:6080 check inter 2000 rise 2 fall 5 - -listen neutron_api_cluster - bind <Virtual IP>:9696 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.1:9696 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:9696 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:9696 check inter 2000 rise 2 fall 5 - -listen swift_proxy_cluster - bind <Virtual IP>:8080 - balance source - option tcplog - option tcpka - server controller1 10.0.0.1:8080 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.2:8080 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.3:8080 check inter 2000 rise 2 fall 5 - After each change of this file, you should restart HAProxy. - - Note that the Galera cluster configuration commands indicate - two of the three controllers are backup. That should be done to ensure - only one node serves write requests because OpenStack support for - multi-node writes is not production-ready yet. - - diff --git a/doc/high-availability-guide/ch_ha_aa_network.xml b/doc/high-availability-guide/ch_ha_aa_network.xml deleted file mode 100644 index 59b40298..00000000 --- a/doc/high-availability-guide/ch_ha_aa_network.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - OpenStack network nodes - OpenStack network nodes contain: - - - Neutron DHCP agent - - - Neutron L2 agent - - - Neutron L3 agent - - - Neutron metadata agent - - - Neutron LBaaS agent - - - - The neutron L2 agent does not need to be highly available. It has to - be installed on each data forwarding node and controls the virtual - networking drivers as Open vSwitch or Linux Bridge. One L2 agent runs per - node and controls its virtual interfaces. That's why it cannot be - distributed and highly available. - - - - - - diff --git a/doc/high-availability-guide/ch_ha_aa_rabbitmq.xml b/doc/high-availability-guide/ch_ha_aa_rabbitmq.xml deleted file mode 100644 index 1c509f7a..00000000 --- a/doc/high-availability-guide/ch_ha_aa_rabbitmq.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - RabbitMQ - RabbitMQ is the default AMQP server used by many OpenStack services. - Making the RabbitMQ service highly available involves the following - steps: - - - Install RabbitMQ - - - Configure RabbitMQ for HA queues - - - Configure OpenStack services to use Rabbit HA queues - - - - - - diff --git a/doc/high-availability-guide/ch_intro.xml b/doc/high-availability-guide/ch_intro.xml deleted file mode 100644 index 4fd9ead2..00000000 --- a/doc/high-availability-guide/ch_intro.xml +++ /dev/null @@ -1,127 +0,0 @@ - - - Introduction to OpenStack High Availability - High Availability systems seek to minimize two things: - - - System downtime - Occurs when a user-facing service is unavailable - beyond a specified maximum amount of time. - - - Data loss - Accidental deletion or destruction of - data. - - - Most high availability systems guarantee protection against system - downtime and data loss only in the event of a single failure. However, - they are also expected to protect against cascading failures, where a - single failure deteriorates into a series of consequential failures. - A crucial aspect of high availability is the elimination of single - points of failure (SPOFs). A SPOF is an individual piece of equipment or - software which will cause system downtime or data loss if it fails. - In order to eliminate SPOFs, check that mechanisms exist for redundancy - of: - - - Network components, such as switches and routers - - - Applications and automatic service migration - - - Storage components - - - Facility services such as power, air conditioning, and fire - protection - - - In the event that a component fails and a back-up system must take on - its load, most high availability systems will replace the failed - component as quickly as possible to maintain necessary redundancy. This - way time spent in a degraded protection state is minimized. - Most high availability systems will fail in the event of multiple - independent (non-consequential) failures. In this case, most systems will - protect data over maintaining availability. - High-availability systems typically achieve an uptime percentage of - 99.99% or more, which roughly equates to less than an hour of cumulative - downtime per year. In order to achieve this, high availability systems - should keep recovery times after a failure to about one to two minutes, - sometimes significantly less. - OpenStack currently meets such availability requirements for its own - infrastructure services, meaning that an uptime of 99.99% is feasible for - the OpenStack infrastructure proper. However, OpenStack does not guarantee - 99.99% availability for individual guest instances. - Preventing single points of failure can depend on whether or not a - service is stateless. -
- Stateless vs. Stateful services - A stateless service is one that provides a response after your - request, and then requires no further attention. To make a stateless - service highly available, you need to provide redundant instances and - load balance them. OpenStack services that are stateless include - nova-api, - nova-conductor, - glance-api, - keystone-api, - neutron-api and - nova-scheduler. - A stateful service is one where subsequent requests to the service - depend on the results of the first request. Stateful services are more - difficult to manage because a single action typically involves more than - one request, so simply providing additional instances and load balancing - will not solve the problem. For example, if the Horizon user interface - reset itself every time you went to a new page, it wouldn't be very - useful. OpenStack services that are stateful include the OpenStack - database and message queue. - Making stateful services highly available can depend on whether you - choose an active/passive or active/active configuration. -
-
- Active/Passive - In an active/passive configuration, systems are set up to bring - additional resources online to replace those that have failed. For - example, OpenStack would write to the main database while maintaining a - disaster recovery database that can be brought online in the event that - the main database fails. - Typically, an active/passive installation for a stateless service - would maintain a redundant instance that can be brought online when - required. Requests may be handled using a virtual IP address to - facilitate return to service with minimal reconfiguration - required. - A typical active/passive installation for a stateful service - maintains a replacement resource that can be brought online when - required. A separate application (such as Pacemaker or Corosync) monitors - these services, bringing the backup online as necessary. -
-
- Active/Active - In an active/active configuration, systems also use a backup but will - manage both the main and redundant systems concurrently. This way, if - there is a failure the user is unlikely to notice. The backup system is - already online, and takes on increased load while the main system is - fixed and brought back online. - Typically, an active/active installation for a stateless service - would maintain a redundant instance, and requests are load balanced using - a virtual IP address and a load balancer such as HAProxy. - A typical active/active installation for a stateful service would - include redundant services with all instances having an identical state. - For example, updates to one instance of a database would also update all - other instances. This way a request to one instance is the same as a - request to any other. A load balancer manages the traffic to these - systems, ensuring that operational systems always handle the - request. - These are some of the more common ways to implement these high - availability architectures, but they are by no means the only ways to do - it. The important thing is to make sure that your services are redundant, - and available; how you achieve that is up to you. This document will - cover some of the more common options for highly available - systems. -
-
diff --git a/doc/high-availability-guide/ch_network.xml b/doc/high-availability-guide/ch_network.xml deleted file mode 100644 index 0e265a1d..00000000 --- a/doc/high-availability-guide/ch_network.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - - Network controller cluster stack - - The network controller sits on the management and data network, and needs to be connected to the Internet if an instance will need access to the Internet. - - Pacemaker requires that both nodes have different hostnames. Because of that, RA scripts could require some adjustments since the Networking - scheduler will be aware of one node, for example a virtual router attached to a single L3 node. For example, both nodes could set different hostnames - in the configuration files, and when the l3-agent started by Pacemaker, the node's hostname will be changed to network-controller automatically. - Whichever node starts the l3-agent will have the same hostname. - - - - - - - - diff --git a/doc/high-availability-guide/ch_pacemaker.xml b/doc/high-availability-guide/ch_pacemaker.xml deleted file mode 100644 index f8344683..00000000 --- a/doc/high-availability-guide/ch_pacemaker.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - The Pacemaker cluster stack - OpenStack infrastructure high availability relies on the - Pacemaker cluster - stack, the state-of-the-art high availability and load balancing stack - for the Linux platform. Pacemaker is storage and application-agnostic, - and is in no way specific to OpenStack. - Pacemaker relies on the - Corosync messaging - layer for reliable cluster communications. Corosync implements the - Totem single-ring ordering and membership protocol. It also provides - UDP and InfiniBand based messaging, quorum, and cluster membership to - Pacemaker. - Pacemaker interacts with applications through resource agents - (RAs), of which it supports over 70 natively. Pacemaker can also - easily use third-party RAs. An OpenStack high-availability - configuration uses existing native Pacemaker RAs (such as those - managing MySQL databases or virtual IP addresses), existing third-party - RAs (such as for RabbitMQ), and native OpenStack RAs (such as those - managing the OpenStack Identity and Image services). - - - - - - diff --git a/doc/high-availability-guide/controller/section_mysql.xml b/doc/high-availability-guide/controller/section_mysql.xml deleted file mode 100644 index 2d575fda..00000000 --- a/doc/high-availability-guide/controller/section_mysql.xml +++ /dev/null @@ -1,229 +0,0 @@ - - -%openstack; -]> -
- Highly available MySQL - MySQL is the default database server used by many OpenStack - services. Making the MySQL service highly available involves: - - - Configuring a DRBD device for use by MySQL - - - Configuring MySQL to use a data directory residing on that DRBD - device - - - Selecting and assigning a virtual IP address (VIP) that can freely - float between cluster nodes - - - Configuring MySQL to listen on that IP address - - - Managing all resources, including the MySQL daemon itself, with - the Pacemaker cluster manager - - - - - MySQL/Galera is - an alternative method of configuring MySQL for high availability. It is - likely to become the preferred method of achieving MySQL high - availability once it has sufficiently matured. At the time of writing, - however, the Pacemaker/DRBD based approach remains the recommended one - for OpenStack environments. - -
- Configure DRBD - The Pacemaker based MySQL server requires a DRBD resource from - which it mounts the /var/lib/mysql directory. In this - example, the DRBD resource is simply named mysql: - - <literal>mysql</literal> DRBD resource configuration - (<filename>/etc/drbd.d/mysql.res</filename>) - - resource mysql { - device minor 0; - disk "/dev/data/mysql"; - meta-disk internal; - on node1 { - address ipv4 10.0.42.100:7700; - } - on node2 { - address ipv4 10.0.42.254:7700; - } -} - - - This resource uses an underlying local disk (in DRBD terminology, a - backing device) named /dev/data/mysql - on both cluster nodes, node1 and node2. - Normally, this would be an LVM Logical Volume specifically set aside for - this purpose. The DRBD meta-disk is internal, meaning DRBD-specific metadata - is being stored at the end of the disk device itself. The - device is configured to communicate between IPv4 addresses - 10.0.42.100 and 10.0.42.254, using - TCP port 7700. Once enabled, it will map to a local DRBD block device - with the device minor number 0, that is, /dev/drbd0. - Enabling a DRBD resource is explained in detail in - - the DRBD User's Guide. In brief, the proper sequence of commands - is this: - # drbdadm create-md mysql -# drbdadm up mysql -# drbdadm -- --force primary mysql - - - Initializes DRBD metadata and writes the initial set of metadata - to /dev/data/mysql. Must be completed on both - nodes. - - - Creates the /dev/drbd0 device node, - attaches the DRBD device to its backing store, and - connects the DRBD node to its peer. Must be - completed on both nodes. - - - Kicks off the initial device synchronization, and puts the device - into the primary (readable and writable) role. See - - Resource roles (from the DRBD User's Guide) for a more - detailed description of the primary and secondary roles in DRBD. Must - be completed on one node only, namely the one - where you are about to continue with creating your filesystem. - - -
-
- Creating a file system - Once the DRBD resource is running and in the primary role (and - potentially still in the process of running the initial device - synchronization), you may proceed with creating the filesystem for - MySQL data. XFS is generally the recommended filesystem due to its - journaling, efficient allocation, and performance: - # mkfs -t xfs /dev/drbd0 - You may also use the alternate device path for the DRBD device, which - may be easier to remember as it includes the self-explanatory resource - name: - # mkfs -t xfs /dev/drbd/by-res/mysql - Once completed, you may safely return the device to the secondary - role. Any ongoing device synchronization will continue in the - background: - # drbdadm secondary mysql -
-
- Prepare MySQL for Pacemaker high availability - In order for Pacemaker monitoring to function properly, you must - ensure that MySQL's database files reside on the DRBD device. If you - already have an existing MySQL database, the simplest approach is to - just move the contents of the existing /var/lib/mysql - directory into the newly created filesystem on the DRBD device. - - You must complete the next step while the MySQL database server is - shut down. - - # mount /dev/drbd/by-res/mysql /mnt -# mv /var/lib/mysql/* /mnt -# umount /mnt - For a new MySQL installation with no existing data, you may also run - the mysql_install_db command: - # mount /dev/drbd/by-res/mysql /mnt -# mysql_install_db --datadir=/mnt -# umount /mnt - Regardless of the approach, the steps outlined here must be completed - on only one cluster node. -
-
- Add MySQL resources to Pacemaker - You can now add the Pacemaker configuration for - MySQL resources. Connect to the Pacemaker cluster with crm - configure, and add the following cluster resources: - primitive p_ip_mysql ocf:heartbeat:IPaddr2 \ - params ip="192.168.42.101" cidr_netmask="24" \ - op monitor interval="30s" -primitive p_drbd_mysql ocf:linbit:drbd \ - params drbd_resource="mysql" \ - op start timeout="90s" \ - op stop timeout="180s" \ - op promote timeout="180s" \ - op demote timeout="180s" \ - op monitor interval="30s" role="Slave" \ - op monitor interval="29s" role="Master" -primitive p_fs_mysql ocf:heartbeat:Filesystem \ - params device="/dev/drbd/by-res/mysql" \ - directory="/var/lib/mysql" \ - fstype="xfs" \ - options="relatime" \ - op start timeout="60s" \ - op stop timeout="180s" \ - op monitor interval="60s" timeout="60s" -primitive p_mysql ocf:heartbeat:mysql \ - params additional_parameters="--bind-address=192.168.42.101" \ - config="/etc/mysql/my.cnf" \ - pid="/var/run/mysqld/mysqld.pid" \ - socket="/var/run/mysqld/mysqld.sock" \ - log="/var/log/mysql/mysqld.log" \ - op monitor interval="20s" timeout="10s" \ - op start timeout="120s" \ - op stop timeout="120s" -group g_mysql p_ip_mysql p_fs_mysql p_mysql -ms ms_drbd_mysql p_drbd_mysql \ - meta notify="true" clone-max="2" -colocation c_mysql_on_drbd inf: g_mysql ms_drbd_mysql:Master -order o_drbd_before_mysql inf: ms_drbd_mysql:promote g_mysql:start - This configuration creates - - - p_ip_mysql, a virtual IP address for use by MySQL - (192.168.42.101), - - - p_fs_mysql, a Pacemaker managed filesystem - mounted to /var/lib/mysql on whatever node - currently runs the MySQL service, - - - ms_drbd_mysql, the master/slave set - managing the mysql DRBD resource, - - - a service group and order - and colocation constraints to ensure resources are - started on the correct nodes, and in the correct sequence. - - - crm configure supports batch input, so you may - copy and paste the above into your live pacemaker configuration, and then - make changes as required. For example, you may enter edit p_ip_mysql - from the crm configure menu and edit the resource to - match your preferred virtual IP address. - Once completed, commit your configuration changes by entering - commit from the crm configure menu. - Pacemaker will then start the MySQL service, and its dependent resources, - on one of your nodes. -
-
- Configure OpenStack services for highly available MySQL - Your OpenStack services must now point their MySQL configuration to - the highly available, virtual cluster IP address—rather than a - MySQL server's physical IP address as you normally would. - For OpenStack Image, for example, if your MySQL service IP address is - 192.168.42.101 as in the configuration explained here, - you would use the following line in your OpenStack Image registry - configuration file (glance-registry.conf): - sql_connection = mysql://glancedbadmin:<password>@192.168.42.101/glance - No other changes are necessary to your OpenStack configuration. If the - node currently hosting your database experiences a problem - necessitating service failover, your OpenStack services may experience - a brief MySQL interruption, as they would in the event of a network - hiccup, and then continue to run normally. -
-
diff --git a/doc/high-availability-guide/controller/section_rabbitmq.xml b/doc/high-availability-guide/controller/section_rabbitmq.xml deleted file mode 100644 index 52023e77..00000000 --- a/doc/high-availability-guide/controller/section_rabbitmq.xml +++ /dev/null @@ -1,215 +0,0 @@ - - -%openstack; -]> -
- Highly available RabbitMQ - RabbitMQ is the default AMQP server used by many OpenStack - services. Making the RabbitMQ service highly available involves: - - - configuring a DRBD device for use by RabbitMQ, - - - configuring RabbitMQ to use a data directory residing on - that DRBD device, - - - selecting and assigning a virtual IP address (VIP) that can freely - float between cluster nodes, - - - configuring RabbitMQ to listen on that IP address, - - - managing all resources, including the RabbitMQ daemon itself, with - the Pacemaker cluster manager. - - - - Active-active mirrored queues - is another method for configuring RabbitMQ versions 3.3.0 and later - for high availability. You can also manage a RabbitMQ cluster with - active-active mirrored queues using the Pacemaker cluster manager. - -
- Configure DRBD - The Pacemaker based RabbitMQ server requires a DRBD resource from - which it mounts the /var/lib/rabbitmq directory. - In this example, the DRBD resource is simply named - rabbitmq: - - <literal>rabbitmq</literal> DRBD resource configuration - (<filename>/etc/drbd.d/rabbitmq.res</filename>) - - resource rabbitmq { - device minor 1; - disk "/dev/data/rabbitmq"; - meta-disk internal; - on node1 { - address ipv4 10.0.42.100:7701; - } - on node2 { - address ipv4 10.0.42.254:7701; - } -} - - - This resource uses an underlying local disk (in DRBD terminology, a - backing device) named /dev/data/rabbitmq on both - cluster nodes, node1 and node2. - Normally, this would be an LVM Logical Volume specifically set aside for - this purpose. The DRBD meta-disk is internal, meaning DRBD-specific - metadata is being stored at the end of the disk device itself. The device - is configured to communicate between IPv4 addresses - 10.0.42.100 and 10.0.42.254, - using TCP port 7701. Once enabled, it will map to a local DRBD block - device with the device minor number 1, that is, - /dev/drbd1. - Enabling a DRBD resource is explained in detail in - the DRBD - User's Guide. In brief, the proper sequence of commands is this: - # drbdadm create-md rabbitmq -# drbdadm up rabbitmq -# drbdadm -- --force primary rabbitmq - - - Initializes DRBD metadata and writes the initial set of - metadata to /dev/data/rabbitmq. Must be - completed on both nodes. - - - Creates the /dev/drbd1 device node, - attaches the DRBD device to its backing store, and connects - the DRBD node to its peer. Must be completed on both nodes. - - - Kicks off the initial device synchronization, and puts the - device into the primary (readable and writable) - role. See - Resource roles (from the DRBD User's Guide) for a more - detailed description of the primary and secondary roles in DRBD. - Must be completed on one node only, namely the one where you - are about to continue with creating your filesystem. - - -
-
- Create a file system - Once the DRBD resource is running and in the primary role (and - potentially still in the process of running the initial device - synchronization), you may proceed with creating the filesystem for - RabbitMQ data. XFS is generally the recommended filesystem: - # mkfs -t xfs /dev/drbd1 - You may also use the alternate device path for the DRBD device, - which may be easier to remember as it includes the self-explanatory - resource name: - # mkfs -t xfs /dev/drbd/by-res/rabbitmq - Once completed, you may safely return the device to the secondary - role. Any ongoing device synchronization will continue in the - background: - # drbdadm secondary rabbitmq -
-
- Prepare RabbitMQ for Pacemaker high availability - In order for Pacemaker monitoring to function properly, you must - ensure that RabbitMQ's .erlang.cookie files - are identical on all nodes, regardless of whether DRBD is mounted - there or not. The simplest way of doing so is to take an existing - .erlang.cookie from one of your nodes, copying - it to the RabbitMQ data directory on the other node, and also - copying it to the DRBD-backed filesystem. - # scp -p /var/lib/rabbitmq/.erlang.cookie node2:/var/lib/rabbitmq/ -# mount /dev/drbd/by-res/rabbitmq /mnt -# cp -a /var/lib/rabbitmq/.erlang.cookie /mnt -# umount /mnt -
-
- Add RabbitMQ resources to Pacemaker - You may now proceed with adding the Pacemaker configuration for - RabbitMQ resources. Connect to the Pacemaker cluster with - crm configure, and add the following cluster - resources: - primitive p_ip_rabbitmq ocf:heartbeat:IPaddr2 \ - params ip="192.168.42.100" cidr_netmask="24" \ - op monitor interval="10s" -primitive p_drbd_rabbitmq ocf:linbit:drbd \ - params drbd_resource="rabbitmq" \ - op start timeout="90s" \ - op stop timeout="180s" \ - op promote timeout="180s" \ - op demote timeout="180s" \ - op monitor interval="30s" role="Slave" \ - op monitor interval="29s" role="Master" -primitive p_fs_rabbitmq ocf:heartbeat:Filesystem \ - params device="/dev/drbd/by-res/rabbitmq" \ - directory="/var/lib/rabbitmq" \ - fstype="xfs" options="relatime" \ - op start timeout="60s" \ - op stop timeout="180s" \ - op monitor interval="60s" timeout="60s" -primitive p_rabbitmq ocf:rabbitmq:rabbitmq-server \ - params nodename="rabbit@localhost" \ - mnesia_base="/var/lib/rabbitmq" \ - op monitor interval="20s" timeout="10s" -group g_rabbitmq p_ip_rabbitmq p_fs_rabbitmq p_rabbitmq -ms ms_drbd_rabbitmq p_drbd_rabbitmq \ - meta notify="true" master-max="1" clone-max="2" -colocation c_rabbitmq_on_drbd inf: g_rabbitmq ms_drbd_rabbitmq:Master -order o_drbd_before_rabbitmq inf: ms_drbd_rabbitmq:promote g_rabbitmq:start - This configuration creates - - - p_ip_rabbitmq, a virtual IP address for - use by RabbitMQ (192.168.42.100), - - - p_fs_rabbitmq, a Pacemaker managed - filesystem mounted to /var/lib/rabbitmq - on whatever node currently runs the RabbitMQ service, - - - ms_drbd_rabbitmq, the master/slave set - managing the rabbitmq DRBD resource, - - - a service group and order and colocation constraints to - ensure resources are started on the correct nodes, and in the - correct sequence. - - - crm configure supports batch input, so you - may copy and paste the above into your live pacemaker configuration, - and then make changes as required. For example, you may enter - edit p_ip_rabbitmq from the - crm configure menu and edit the resource to - match your preferred virtual IP address. - Once completed, commit your configuration changes by entering - commit from the crm configure - menu. Pacemaker will then start the RabbitMQ service, and its - dependent resources, on one of your nodes. -
-
- Configure OpenStack services for highly available RabbitMQ - Your OpenStack services must now point their RabbitMQ - configuration to the highly available, virtual cluster IP - address—rather than a RabbitMQ server's physical IP address - as you normally would. - For OpenStack Image, for example, if your RabbitMQ service - IP address is 192.168.42.100 as in the - configuration explained here, you would use the following line - in your OpenStack Image API configuration file - (glance-api.conf): - rabbit_host = 192.168.42.100 - No other changes are necessary to your OpenStack configuration. - If the node currently hosting your RabbitMQ experiences a problem - necessitating service failover, your OpenStack services may - experience a brief RabbitMQ interruption, as they would in the - event of a network hiccup, and then continue to run normally. -
-
diff --git a/doc/high-availability-guide/figures/Check_mark_23x20_02.svg b/doc/high-availability-guide/figures/Check_mark_23x20_02.svg deleted file mode 100644 index 3051a2f9..00000000 --- a/doc/high-availability-guide/figures/Check_mark_23x20_02.svg +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - image/svg+xml - - - - - - - - diff --git a/doc/high-availability-guide/figures/README b/doc/high-availability-guide/figures/README deleted file mode 100644 index 4bad6d19..00000000 --- a/doc/high-availability-guide/figures/README +++ /dev/null @@ -1 +0,0 @@ -Place any figures and illustrations in this directory. diff --git a/doc/high-availability-guide/ha_aa_controllers/section_memcached.xml b/doc/high-availability-guide/ha_aa_controllers/section_memcached.xml deleted file mode 100644 index 53819845..00000000 --- a/doc/high-availability-guide/ha_aa_controllers/section_memcached.xml +++ /dev/null @@ -1,28 +0,0 @@ - -
- Memcached - Most OpenStack services can use Memcached to - store ephemeral data, such as tokens. Although Memcached - does not support typical forms of redundancy, such as clustering, OpenStack - services can use almost any number of instances by configuring multiple - hostnames or IP addresses. The Memcached - client implements hashing to balance objects among the instances. Failure of - an instance only impacts a percentage of the objects and the client automatically - removes it from the list of instances. - To install and configure it, read the - - official documentation. - Memory caching is managed by oslo-incubator, so the way to use - multiple memcached servers is the same for all projects. - Example configuration with two hosts: - memcached_servers = controller1:11211,controller2:11211 - By default, controller1 handles the caching service - but if the host goes down, controller2 does the job. For - more information about Memcached installation, - see the - OpenStack Cloud Administrator Guide. -
diff --git a/doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml b/doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml deleted file mode 100644 index 291fc4c8..00000000 --- a/doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml +++ /dev/null @@ -1,74 +0,0 @@ - -
- Run OpenStack API and schedulers -
- API services - All OpenStack projects have an API service for controlling all the - resources in the Cloud. In active/active mode, the most common setup is - to scale out these services on at least two nodes and to use load balancing - and a virtual IP address (with HAProxy and - Keepalived in this setup). - To use highly available and scalable API services, we need to - ensure that: - - - You use virtual IP addresses when configuring OpenStack Identity - endpoints. - - - - All OpenStack configuration files should refer to virtual - IP addresses. - - - - The monitor check is quite simple since it just establishes a TCP - connection to the API port. Comparing to the active/passive mode using - Corosync and resource agents, - we do not check if the service is actually running. That is why all - OpenStack API services should be monitored by another tool, for example - Nagios. - -
-
- Schedulers - OpenStack schedulers are used to determine how to dispatch compute, - network, and volume requests. The most common setup is to use RabbitMQ as a - messaging system. Those services are connected to the messaging back end - and can scale out: - - - nova-scheduler - - - nova-conductor - - - cinder-scheduler - - - neutron-server - - - ceilometer-collector - - - heat-engine - - - Please refer to the RabbitMQ section - for configuring these services with multiple messaging servers. -
-
- Telemetry Central agent - The Telemetry Central agent can be configured to partition its - polling workload between multiple agents, enabling high availability. - Please refer to - this section of the OpenStack Cloud Administrator Guide - for the requirements and implementation details of this configuration. -
-
diff --git a/doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml b/doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml deleted file mode 100644 index 44d47e35..00000000 --- a/doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml +++ /dev/null @@ -1,152 +0,0 @@ - -
- MariaDB with Galera (Red Hat-based platforms) - MariaDB with Galera provides synchronous database replication in an - active-active, multi-master environment. High availability for the data - itself is managed internally by Galera, while access availability will be - managed by HAProxy. - This guide assumes that three nodes are used to form the MariaDB - Galera cluster. Unless otherwise specified, all commands need to be - executed on all cluster nodes. - - - To install MariaDB with Galera - - -Red Hat-based distributions include Galera packages in their repositories. To -install the most current version of the packages, run the following command: - - # yum install -y mariadb-galera-server xinetd rsync - - - (Optional) Configure the clustercheck utility. - If HAProxy is used to load-balance client access to MariaDB, - as described in the HAProxy section - of this document, you can use the clustercheck utility - to improve health checks. - - - - Create file etc/sysconfig/clustercheck - with the following contents: - - MYSQL_USERNAME="clustercheck" -MYSQL_PASSWORD=PASSWORD -MYSQL_HOST="localhost" -MYSQL_PORT="3306" - - Make sure a sensible password is used. - - - - Configure monitor service (used by HAProxy): - Create file /etc/xinetd.d/galera-monitor - with the following contents: - service galera-monitor -{ - port = 9200 - disable = no - socket_type = stream - protocol = tcp - wait = no - user = root - group = root - groups = yes - server = /usr/bin/clustercheck - type = UNLISTED - per_source = UNLIMITED - log_on_success = - log_on_failure = HOST - flags = REUSE -} - - - Create the database user required by clustercheck: - # systemctl start mysqld -# mysql -e "CREATE USER 'clustercheck'@'localhost' IDENTIFIED BY 'PASSWORD';" -# systemctl stop mysqld - - - Start xinetd (required by clustercheck): - # systemctl daemon-reload -# systemctl enable xinetd -# systemctl start xinetd - - - - - Configure MariaDB with Galera. - - - - Create the Galera configuration file /etc/my.cnf.d/galera.cnf - with the following contents: - - [mysqld] -skip-name-resolve=1 -binlog_format=ROW -default-storage-engine=innodb -innodb_autoinc_lock_mode=2 -innodb_locks_unsafe_for_binlog=1 -max_connections=2048 -query_cache_size=0 -query_cache_type=0 -bind_address=NODE_IP -wsrep_provider=/usr/lib64/galera/libgalera_smm.so -wsrep_cluster_name="galera_cluster" -wsrep_cluster_address="gcomm://PRIMARY_NODE_IP, SECONDARY_NODE_IP, TERTIARY_NODE_IP" -wsrep_slave_threads=1 -wsrep_certify_nonPK=1 -wsrep_max_ws_rows=131072 -wsrep_max_ws_size=1073741824 -wsrep_debug=0 -wsrep_convert_LOCK_to_trx=0 -wsrep_retry_autocommit=1 -wsrep_auto_increment_control=1 -wsrep_drupal_282555_workaround=0 -wsrep_causal_reads=0 -wsrep_notify_cmd= -wsrep_sst_method=rsync - - - Open firewall ports used for MariaDB and Galera communications: - # firewall-cmd --add-service=mysql -# firewall-cmd --add-port=4444/tcp -# firewall-cmd --add-port=4567/tcp -# firewall-cmd --add-port=4568/tcp -# firewall-cmd --add-port=9200/tcp -# firewall-cmd --add-port=9300/tcp -# firewall-cmd --add-service=mysql --permanent -# firewall-cmd --add-port=4444/tcp --permanent -# firewall-cmd --add-port=4567/tcp --permanent -# firewall-cmd --add-port=4568/tcp --permanent -# firewall-cmd --add-port=9200/tcp --permanent -# firewall-cmd --add-port=9300/tcp --permanent - - - Start MariaDB cluster: - - - On node 1: - # sudo -u mysql /usr/libexec/mysqld --wsrep-cluster-address='gcomm://' & - - - On nodes 2 and 3: - # systemctl start mariadb - - - Once the output from clustercheck - is on all nodes, restart MariaDB on node 1: - # kill <mysql PIDs> -# systemctl start mariadb - - - - - - -
diff --git a/doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml b/doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml deleted file mode 100644 index af9cddae..00000000 --- a/doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml +++ /dev/null @@ -1,195 +0,0 @@ - -
- MySQL with Galera - Rather than starting with a vanilla version of MySQL, and then adding - Galera, you will want to install a version of MySQL patched for wsrep - (Write Set REPlication) from https://launchpad.net/codership-mysql. - The wsrep API is suitable for configuring MySQL High Availability in - OpenStack because it supports synchronous replication. - Note that the installation requirements call for careful attention. Read - the guide https://launchpadlibrarian.net/66669857/README-wsrep - to ensure you follow all the required steps. - And for any additional information about Galera, you can access this guide: - http://galeracluster.com/documentation-webpages/gettingstarted.html - - Installing Galera through a MySQL version patched for wsrep: - - -Setup the repository for Ubuntu 14.04 "trusty" (most recent). -Install the software properties, the key, and the repository: - - # apt-get install software-properties-common -# apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xcbcb082a1bb943db -# add-apt-repository 'deb http://ams2.mirrors.digitalocean.com/mariadb/repo/5.5/ubuntu trusty main' - - - -You can change the mirror to one near you on: - downloads.mariadb.org - - - - - -Update your system and install the required packages: -# apt-get update -# apt-get install mariadb-galera-server galera - - - -If you have mariaDB already installed you need to re-apply all the permissions from the installation guide. It will purge all privileges! - - - - - Adjust the configuration: - In the /etc/mysql/my.conf file, make the following changes: - query_cache_size=0 -binlog_format=ROW -default_storage_engine=innodb -innodb_autoinc_lock_mode=2 -innodb_doublewrite=1 - - - - Create the /etc/mysql/conf.d/wsrep.cnf file. - - - Paste the following lines in this file: - [mysqld] -wsrep_provider=/usr/lib/galera/libgalera_smm.so -wsrep_cluster_name="Openstack" -wsrep_sst_auth=wsrep_sst:wspass -wsrep_cluster_address="gcomm://PRIMARY_NODE_IP,SECONDARY_NODE_IP" -wsrep_sst_method=rsync -wsrep_node_address="PRIMARY_NODE_IP" -wsrep_node_name="NODE_NAME" - - - Replace PRIMARY_NODE_IP and - SECONDARY_NODE_IP with the IP addresses - of your primary and secondary servers. - - - Replace PRIMARY_NODE_IP with the hostname of the server. This is set for logging. - - Copy this file to all other databases servers and change the value of wsrep_cluster_address and wsrep_node_name accordingly. - - - Start mysql as root and execute the following queries: - mysql> SET wsrep_on=OFF; GRANT ALL ON *.* TO wsrep_sst@'%' IDENTIFIED BY 'wspass'; - Remove user accounts with empty user names because they cause problems: - mysql> SET wsrep_on=OFF; DELETE FROM mysql.user WHERE user=''; - - - Check that the nodes can access each other through the firewall. - Depending on your environment, this might mean adjusting iptables, as - in: - # iptables --insert RH-Firewall-1-INPUT 1 --proto tcp \ - --source <my IP>/24 --destination <my IP>/32 --dport 3306 \ - -j ACCEPT -# iptables --insert RH-Firewall-1-INPUT 1 --proto tcp \ - --source <my IP>/24 --destination <my IP>/32 --dport 4567 \ - -j ACCEPT - This might also mean configuring any NAT firewall between nodes to - allow direct connections. You might need to disable SELinux, or - configure it to allow mysqld - to listen to sockets at unprivileged ports. - - - - -For the next step create a back-up file of the debian.cnf file in /etc/mysql on all database servers. -Should something go wrong just copy the back-up file back. - - # cp debian.cnf debian.cnf.old - - Make sure you have SSH root access on the other servers. From the primary database server, copy the debian.cnf file to all other servers by running the following command: - - # scp /etc/mysql/debian.cnf root@IP-address:/etc/mysql - After the copy make sure that all files are the same, you can do this by using the following command: - # md5sum debian.cnf - From the debian.cnf get the database password: - # cat /etc/mysql/debian.cnf - The result will look like this: - [client] -host = localhost -user = debian-sys-maint -password = FiKiOY1Lw8Sq46If -socket = /var/run/mysqld/mysqld.sock -[mysql_upgrade] -host = localhost -user = debian-sys-maint -password = FiKiOY1Lw8Sq46If -socket = /var/run/mysqld/mysqld.sock -basedir = /usr - - -The below query should be run on every server except the primary node. This will make sure that you can restart the database again. -Do not forget to add the password from the debian.cnf. To do this, run: - - mysql> GRANT SHUTDOWN ON *.* TO ‘debian-sys-maint’@’localhost' IDENTIFIED BY '<debian.cnf password>'; -mysql> GRANT SELECT ON `mysql`.`user` TO ‘debian-sys-maint’@’localhost' IDENTIFIED BY '<debian.cnf password>'; - Stop all the mysql servers and start the first server with the following command: - # service mysql start --wsrep-new-cluster -All other nodes can now be started using: - # service mysql start -Verify the wsrep replication by logging in as root under mysql and running the following command: -mysql> SHOW STATUS LIKE ‘wsrep%’; -+------------------------------+--------------------------------------+ -| Variable_name | Value | -+------------------------------+--------------------------------------+ -| wsrep_local_state_uuid | d6a51a3a-b378-11e4-924b-23b6ec126a13 | -| wsrep_protocol_version | 5 | -| wsrep_last_committed | 202 | -| wsrep_replicated | 201 | -| wsrep_replicated_bytes | 89579 | -| wsrep_repl_keys | 865 | -| wsrep_repl_keys_bytes | 11543 | -| wsrep_repl_data_bytes | 65172 | -| wsrep_repl_other_bytes | 0 | -| wsrep_received | 8 | -| wsrep_received_bytes | 853 | -| wsrep_local_commits | 201 | -| wsrep_local_cert_failures | 0 | -| wsrep_local_replays | 0 | -| wsrep_local_send_queue | 0 | -| wsrep_local_send_queue_avg | 0.000000 | -| wsrep_local_recv_queue | 0 | -| wsrep_local_recv_queue_avg | 0.000000 | -| wsrep_local_cached_downto | 1 | -| wsrep_flow_control_paused_ns | 0 | -| wsrep_flow_control_paused | 0.000000 | -| wsrep_flow_control_sent | 0 | -| wsrep_flow_control_recv | 0 | -| wsrep_cert_deps_distance | 1.029703 | -| wsrep_apply_oooe | 0.024752 | -| wsrep_apply_oool | 0.000000 | -| wsrep_apply_window | 1.024752 | -| wsrep_commit_oooe | 0.000000 | -| wsrep_commit_oool | 0.000000 | -| wsrep_commit_window | 1.000000 | -| wsrep_local_state | 4 | -| wsrep_local_state_comment | Synced | -| wsrep_cert_index_size | 18 | -| wsrep_causal_reads | 0 | -| wsrep_cert_interval | 0.024752 | -| wsrep_incoming_addresses | <first IP>:3306,<second IP>:3306 | -| wsrep_cluster_conf_id | 2 | -| wsrep_cluster_size | 2 | -| wsrep_cluster_state_uuid | d6a51a3a-b378-11e4-924b-23b6ec126a13 | -| wsrep_cluster_status | Primary | -| wsrep_connected | ON | -| wsrep_local_bf_aborts | 0 | -| wsrep_local_index | 1 | -| wsrep_provider_name | Galera | -| wsrep_provider_vendor | Codership Oy <info@codership.com> | -| wsrep_provider_version | 25.3.5-wheezy(rXXXX) | -| wsrep_ready | ON | -| wsrep_thread_count | 2 | -+------------------------------+--------------------------------------+ -
diff --git a/doc/high-availability-guide/ha_aa_network/section_run_neutron_dhcp_agent.xml b/doc/high-availability-guide/ha_aa_network/section_run_neutron_dhcp_agent.xml deleted file mode 100644 index 9bfd3db4..00000000 --- a/doc/high-availability-guide/ha_aa_network/section_run_neutron_dhcp_agent.xml +++ /dev/null @@ -1,16 +0,0 @@ - -
- Run neutron DHCP agent - The OpenStack Networking service has a scheduler that lets you - run multiple agents across nodes. Also, the DHCP agent can be - natively highly available. - You can configure the number of DHCP agents per network using the - parameter in - neutron.conf. - By default this is equal to 1. To achieve high availability - assign more than one DHCP agent per network. -
diff --git a/doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml b/doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml deleted file mode 100644 index 4f6ee26e..00000000 --- a/doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml +++ /dev/null @@ -1,82 +0,0 @@ - -
- Run neutron L3 agent - The neutron L3 agent is scalable, due to the scheduler - that allows distribution of virtual routers across multiple nodes. - The following options are available to make a router highly - available: - - - Automatic L3 agent failover for routers via the - configuration - option in /etc/neutron/neutron.conf. - - - Use Layer 3 High Availability with VRRP. The following - configuration options need to be set in /etc/neutron/neutron.conf - to enable it: - - - - - - - Option - Value to set - Description - - - - - - l3_ha - - - - - - All routers will be highly available by default. - - - - - max_l3_agents_per_router - - - 2 - - - Maximum number of network nodes to be used for the HA - router. The value can be larger than 2 but needs to be at least - 2. - - - - - min_l3_agents_per_router - - - 2 - - - Minimum number of network nodes to be used for the HA - router. A new router creation will fail unless there are - at least network - nodes available. The value should not be less than 2. - - - - - - - Using the active/passive solution to run the Neutron L3 agent in - failover mode with Pacemaker. See the - active/passive section - of this guide. - - -
diff --git a/doc/high-availability-guide/ha_aa_network/section_run_neutron_lbaas_agent.xml b/doc/high-availability-guide/ha_aa_network/section_run_neutron_lbaas_agent.xml deleted file mode 100644 index a4134e7c..00000000 --- a/doc/high-availability-guide/ha_aa_network/section_run_neutron_lbaas_agent.xml +++ /dev/null @@ -1,13 +0,0 @@ - -
- Run neutron LBaaS agent - Currently, there's no native feature to make the LBaaS agent highly - available using the default plug-in HAProxy. A common way to make - HAProxy highly available is to use the VRRP (Virtual Router Redundancy - Protocol). Unfortunately, this is not yet implemented in the LBaaS - HAProxy plug-in. -
diff --git a/doc/high-availability-guide/ha_aa_network/section_run_neutron_metadata_agent.xml b/doc/high-availability-guide/ha_aa_network/section_run_neutron_metadata_agent.xml deleted file mode 100644 index a5f5a4a1..00000000 --- a/doc/high-availability-guide/ha_aa_network/section_run_neutron_metadata_agent.xml +++ /dev/null @@ -1,13 +0,0 @@ - -
- Run neutron metadata agent - There is no native feature to make this service highly available. - At this time, the Active / Passive solution exists to run the neutron - metadata agent in failover mode with Pacemaker. - See the active/passive - section of this guide. -
diff --git a/doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml b/doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml deleted file mode 100644 index e6689241..00000000 --- a/doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml +++ /dev/null @@ -1,64 +0,0 @@ - -
- Configure OpenStack services to use RabbitMQ - We have to configure the OpenStack components to use at least two - RabbitMQ nodes. - Do this configuration on all services using RabbitMQ: - - - RabbitMQ HA cluster host:port pairs: - rabbit_hosts=rabbit1:5672,rabbit2:5672 - - - How frequently to retry connecting with RabbitMQ: - rabbit_retry_interval=1 - - - How long to back-off for between retries when connecting to - RabbitMQ: - rabbit_retry_backoff=2 - - - Maximum retries with trying to connect to RabbitMQ (infinite by - default): - rabbit_max_retries=0 - - - Use durable queues in RabbitMQ: - rabbit_durable_queues=true - - - Use HA queues in RabbitMQ (x-ha-policy: all): - rabbit_ha_queues=true - - - - - If you change the configuration from an old setup which did not use - HA queues, you should interrupt the service: - # rabbitmqctl stop_app -# rabbitmqctl reset -# rabbitmqctl start_app - - - Services currently working with HA queues: - - - OpenStack Compute - - - OpenStack Block Storage - - - OpenStack Networking - - - Telemetry - - - -
diff --git a/doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml b/doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml deleted file mode 100644 index 840a96b9..00000000 --- a/doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml +++ /dev/null @@ -1,61 +0,0 @@ - -
- Configure RabbitMQ - We are building a cluster of RabbitMQ nodes to construct a - RabbitMQ broker, a logical grouping of several Erlang nodes. - We have to consider that while exchanges and bindings will survive - the loss of individual nodes, queues and their messages will not because a - queue and its contents is located on one node. If we lose this node, we - also lose the queue. - Mirrored queues in RabbitMQ improve the availability - of service since it will be resilient to failures. - We consider that we run (at least) two RabbitMQ servers and we call the - nodes rabbit1 and rabbit2. To build a - broker, we need to ensure that all nodes have the same Erlang cookie - file. - To do so, stop RabbitMQ everywhere and copy the cookie from the first - node to the other node(s): - # scp /var/lib/rabbitmq/.erlang.cookie \ -root@NODE:/var/lib/rabbitmq/.erlang.cookie - On the target nodes ensure the correct owner, group, and permissions of the - .erlang.cookie file: - # chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie -# chmod 400 /var/lib/rabbitmq/.erlang.cookie - Start RabbitMQ on all nodes and verify the nodes are running: - # rabbitmqctl cluster_status -Cluster status of node rabbit@NODE... -[{nodes,[{disc,[rabbit@NODE]}]}, - {running_nodes,[rabbit@NODE]}, - {partitions,[]}] -...done. - Run the following commands on all nodes except the first one: - # rabbitmqctl stop_app -Stopping node rabbit@NODE... -...done. -# rabbitmqctl join_cluster rabbit@rabbit1 -# rabbitmqctl start_app -Starting node rabbit@NODE ... -...done. - To verify the cluster status: - # rabbitmqctl cluster_status -Cluster status of node rabbit@NODE... -[{nodes,[{disc,[rabbit@rabbit1]},{ram,[rabbit@NODE]}]},{running_nodes,[rabbit@NODE,rabbit@rabbit1]}] - If the cluster is working, you can now proceed to creating users and - passwords for queues. - To ensure that all queues, except those with auto-generated names, are - mirrored across all running nodes it is necessary to set the policy key - ha-mode to all. Run the following - command on one of the nodes: - # rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}' - - More information about - highly available queues and - - clustering can be found in the official RabbitMQ - documentation. - -
diff --git a/doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml b/doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml deleted file mode 100644 index 09a87e58..00000000 --- a/doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml +++ /dev/null @@ -1,68 +0,0 @@ - -
- Install RabbitMQ -
- On Ubuntu and Debian - RabbitMQ is packaged on both distros: - # apt-get install rabbitmq-server - - - - Official manual for installing RabbitMQ on Ubuntu and Debian - - -
-
- On Fedora and RHEL - RabbitMQ is packaged on both distros: - # yum install rabbitmq-server - - - - Official manual for installing RabbitMQ on Fedora and RHEL - - -
-
- On openSUSE and SLES - - On openSUSE: - - # zypper install rabbitmq-server - - - - - - Official manual for installing RabbitMQ on openSUSE - - - - - Official manual for installing RabbitMQ on openSUSE - - - On SLES 12: - - # zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo - - The packages are signed by GPG key 893A90DAD85F9316. You should - verify the fingerprint of the imported GPG key before using - it. - Key ID: 893A90DAD85F9316 -Key Name: Cloud:OpenStack OBS Project <Cloud:OpenStack@build.opensuse.org> -Key Fingerprint: 35B34E18ABC1076D66D5A86B893A90DAD85F9316 -Key Created: Tue Oct 8 13:34:21 2013 -Key Expires: Thu Dec 17 13:34:21 2015 - - - - # zypper install rabbitmq-server - - -
-
diff --git a/doc/high-availability-guide/includes/corosync.conf b/doc/high-availability-guide/includes/corosync.conf deleted file mode 100644 index 0eb945fc..00000000 --- a/doc/high-availability-guide/includes/corosync.conf +++ /dev/null @@ -1,67 +0,0 @@ -totem { - version: 2 - - # Time (in ms) to wait for a token <1> - token: 10000 - - # How many token retransmits before forming a new - # configuration - token_retransmits_before_loss_const: 10 - - # Turn off the virtual synchrony filter - vsftype: none - - # Enable encryption <2> - secauth: on - - # How many threads to use for encryption/decryption - threads: 0 - - # This specifies the redundant ring protocol, which may be - # none, active, or passive. <3> - rrp_mode: active - - # The following is a two-ring multicast configuration. <4> - interface { - ringnumber: 0 - bindnetaddr: 192.168.42.0 - mcastaddr: 239.255.42.1 - mcastport: 5405 - } - interface { - ringnumber: 1 - bindnetaddr: 10.0.42.0 - mcastaddr: 239.255.42.2 - mcastport: 5405 - } -} - -amf { - mode: disabled -} - -service { - # Load the Pacemaker Cluster Resource Manager <5> - ver: 1 - name: pacemaker -} - -aisexec { - user: root - group: root -} - -logging { - fileline: off - to_stderr: yes - to_logfile: no - to_syslog: yes - syslog_facility: daemon - debug: off - timestamp: on - logger_subsys { - subsys: AMF - debug: off - tags: enter|leave|trace1|trace2|trace3|trace4|trace6 - } -} diff --git a/doc/high-availability-guide/includes/mysql.res b/doc/high-availability-guide/includes/mysql.res deleted file mode 100644 index 3d6d3aed..00000000 --- a/doc/high-availability-guide/includes/mysql.res +++ /dev/null @@ -1,11 +0,0 @@ -resource mysql { - device minor 0; - disk "/dev/data/mysql"; - meta-disk internal; - on node1 { - address ipv4 10.0.42.100:7700; - } - on node2 { - address ipv4 10.0.42.254:7700; - } -} diff --git a/doc/high-availability-guide/includes/pacemaker-api.crm b/doc/high-availability-guide/includes/pacemaker-api.crm deleted file mode 100644 index fce83b8a..00000000 --- a/doc/high-availability-guide/includes/pacemaker-api.crm +++ /dev/null @@ -1,2 +0,0 @@ -group g_services_api p_api-ip p_keystone p_glance-api p_cinder-api \ - p_neutron-server p_glance-registry p_ceilometer-agent-central diff --git a/doc/high-availability-guide/includes/pacemaker-api_vip.crm b/doc/high-availability-guide/includes/pacemaker-api_vip.crm deleted file mode 100644 index 9b22746b..00000000 --- a/doc/high-availability-guide/includes/pacemaker-api_vip.crm +++ /dev/null @@ -1,3 +0,0 @@ -primitive p_api-ip ocf:heartbeat:IPaddr2 \ - params ip="192.168.42.103" cidr_netmask="24" \ - op monitor interval="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-ceilometer_agent_central.crm b/doc/high-availability-guide/includes/pacemaker-ceilometer_agent_central.crm deleted file mode 100644 index c5b88c95..00000000 --- a/doc/high-availability-guide/includes/pacemaker-ceilometer_agent_central.crm +++ /dev/null @@ -1,3 +0,0 @@ -primitive p_ceilometer-agent-central ocf:openstack:ceilometer-agent-central \ - params config="/etc/ceilometer/ceilometer.conf" \ - op monitor interval="30s" timeout="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-cinder_api.crm b/doc/high-availability-guide/includes/pacemaker-cinder_api.crm deleted file mode 100644 index d5300ea5..00000000 --- a/doc/high-availability-guide/includes/pacemaker-cinder_api.crm +++ /dev/null @@ -1,4 +0,0 @@ -primitive p_cinder-api ocf:openstack:cinder-api \ - params config="/etc/cinder/cinder.conf" os_password="secretsecret" os_username="admin" \ - os_tenant_name="admin" keystone_get_token_url="http://192.168.42.103:5000/v2.0/tokens" \ - op monitor interval="30s" timeout="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-glance_api.crm b/doc/high-availability-guide/includes/pacemaker-glance_api.crm deleted file mode 100644 index 5387950f..00000000 --- a/doc/high-availability-guide/includes/pacemaker-glance_api.crm +++ /dev/null @@ -1,3 +0,0 @@ -primitive p_glance-api ocf:openstack:glance-api \ - params config="/etc/glance/glance-api.conf" os_password="secretsecret" os_username="admin" os_tenant_name="admin" os_auth_url="http://192.168.42.103:5000/v2.0/" \ - op monitor interval="30s" timeout="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-keystone.crm b/doc/high-availability-guide/includes/pacemaker-keystone.crm deleted file mode 100644 index d0a4febf..00000000 --- a/doc/high-availability-guide/includes/pacemaker-keystone.crm +++ /dev/null @@ -1,3 +0,0 @@ -primitive p_keystone ocf:openstack:keystone \ - params config="/etc/keystone/keystone.conf" os_password="secretsecret" os_username="admin" os_tenant_name="admin" os_auth_url="http://192.168.42.103:5000/v2.0/" \ - op monitor interval="30s" timeout="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-mysql.crm b/doc/high-availability-guide/includes/pacemaker-mysql.crm deleted file mode 100644 index 79b8db11..00000000 --- a/doc/high-availability-guide/includes/pacemaker-mysql.crm +++ /dev/null @@ -1,33 +0,0 @@ -primitive p_ip_mysql ocf:heartbeat:IPaddr2 \ - params ip="192.168.42.101" cidr_netmask="24" \ - op monitor interval="30s" -primitive p_drbd_mysql ocf:linbit:drbd \ - params drbd_resource="mysql" \ - op start timeout="90s" \ - op stop timeout="180s" \ - op promote timeout="180s" \ - op demote timeout="180s" \ - op monitor interval="30s" role="Slave" \ - op monitor interval="29s" role="Master" -primitive p_fs_mysql ocf:heartbeat:Filesystem \ - params device="/dev/drbd/by-res/mysql" \ - directory="/var/lib/mysql" \ - fstype="xfs" \ - options="relatime" \ - op start timeout="60s" \ - op stop timeout="180s" \ - op monitor interval="60s" timeout="60s" -primitive p_mysql ocf:heartbeat:mysql \ - params additional_parameters="--bind-address=50.56.179.138" \ - config="/etc/mysql/my.cnf" \ - pid="/var/run/mysqld/mysqld.pid" \ - socket="/var/run/mysqld/mysqld.sock" \ - log="/var/log/mysql/mysqld.log" \ - op monitor interval="20s" timeout="10s" \ - op start timeout="120s" \ - op stop timeout="120s" -group g_mysql p_ip_mysql p_fs_mysql p_mysql -ms ms_drbd_mysql p_drbd_mysql \ - meta notify="true" clone-max="2" -colocation c_mysql_on_drbd inf: g_mysql ms_drbd_mysql:Master -order o_drbd_before_mysql inf: ms_drbd_mysql:promote g_mysql:start diff --git a/doc/high-availability-guide/includes/pacemaker-network-dhcp.crm b/doc/high-availability-guide/includes/pacemaker-network-dhcp.crm deleted file mode 100644 index 5c18ee85..00000000 --- a/doc/high-availability-guide/includes/pacemaker-network-dhcp.crm +++ /dev/null @@ -1,4 +0,0 @@ -primitive p_neutron-dhcp-agent ocf:openstack:neutron-dhcp-agent \ - params config="/etc/neutron/neutron.conf" \ - plugin_config="/etc/neutron/dhcp_agent.ini" \ - op monitor interval="30s" timeout="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-network-l3.crm b/doc/high-availability-guide/includes/pacemaker-network-l3.crm deleted file mode 100644 index c7a5295d..00000000 --- a/doc/high-availability-guide/includes/pacemaker-network-l3.crm +++ /dev/null @@ -1,4 +0,0 @@ -primitive p_neutron-l3-agent ocf:openstack:neutron-agent-l3 \ - params config="/etc/neutron/neutron.conf" \ - plugin_config="/etc/neutron/l3_agent.ini" \ - op monitor interval="30s" timeout="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-network-metadata.crm b/doc/high-availability-guide/includes/pacemaker-network-metadata.crm deleted file mode 100644 index 3dfef616..00000000 --- a/doc/high-availability-guide/includes/pacemaker-network-metadata.crm +++ /dev/null @@ -1,4 +0,0 @@ -primitive p_neutron-metadata-agent ocf:openstack:neutron-metadata-agent \ - params config="/etc/neutron/neutron.conf" \ - agent_config="/etc/neutron/metadata_agent.ini" \ - op monitor interval="30s" timeout="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-network.crm b/doc/high-availability-guide/includes/pacemaker-network.crm deleted file mode 100644 index 353a8983..00000000 --- a/doc/high-availability-guide/includes/pacemaker-network.crm +++ /dev/null @@ -1,2 +0,0 @@ -group g_services_network p_neutron-l3-agent p_neutron-dhcp-agent \ - p_neutron-metadata_agent diff --git a/doc/high-availability-guide/includes/pacemaker-neutron_server.crm b/doc/high-availability-guide/includes/pacemaker-neutron_server.crm deleted file mode 100644 index 300bce79..00000000 --- a/doc/high-availability-guide/includes/pacemaker-neutron_server.crm +++ /dev/null @@ -1,4 +0,0 @@ -primitive p_neutron-server ocf:openstack:neutron-server \ - params os_password="secretsecret" os_username="admin" os_tenant_name="admin" \ - keystone_get_token_url="http://192.168.42.103:5000/v2.0/tokens" \ - op monitor interval="30s" timeout="30s" diff --git a/doc/high-availability-guide/includes/pacemaker-properties.crm b/doc/high-availability-guide/includes/pacemaker-properties.crm deleted file mode 100644 index a31efef3..00000000 --- a/doc/high-availability-guide/includes/pacemaker-properties.crm +++ /dev/null @@ -1,5 +0,0 @@ -property no-quorum-policy="ignore" \ # <1> - pe-warn-series-max="1000" \ # <2> - pe-input-series-max="1000" \ - pe-error-series-max="1000" \ - cluster-recheck-interval="5min" # <3> diff --git a/doc/high-availability-guide/includes/pacemaker-rabbitmq.crm b/doc/high-availability-guide/includes/pacemaker-rabbitmq.crm deleted file mode 100644 index 153da483..00000000 --- a/doc/high-availability-guide/includes/pacemaker-rabbitmq.crm +++ /dev/null @@ -1,27 +0,0 @@ -primitive p_ip_rabbitmq ocf:heartbeat:IPaddr2 \ - params ip="192.168.42.100" cidr_netmask="24" \ - op monitor interval="10s" -primitive p_drbd_rabbitmq ocf:linbit:drbd \ - params drbd_resource="rabbitmq" \ - op start timeout="90s" \ - op stop timeout="180s" \ - op promote timeout="180s" \ - op demote timeout="180s" \ - op monitor interval="30s" role="Slave" \ - op monitor interval="29s" role="Master" -primitive p_fs_rabbitmq ocf:heartbeat:Filesystem \ - params device="/dev/drbd/by-res/rabbitmq" \ - directory="/var/lib/rabbitmq" \ - fstype="xfs" options="relatime" \ - op start timeout="60s" \ - op stop timeout="180s" \ - op monitor interval="60s" timeout="60s" -primitive p_rabbitmq ocf:rabbitmq:rabbitmq-server \ - params nodename="rabbit@localhost" \ - mnesia_base="/var/lib/rabbitmq" \ - op monitor interval="20s" timeout="10s" -group g_rabbitmq p_ip_rabbitmq p_fs_rabbitmq p_rabbitmq -ms ms_drbd_rabbitmq p_drbd_rabbitmq \ - meta notify="true" master-max="1" clone-max="2" -colocation c_rabbitmq_on_drbd inf: g_rabbitmq ms_drbd_rabbitmq:Master -order o_drbd_before_rabbitmq inf: ms_drbd_rabbitmq:promote g_rabbitmq:start diff --git a/doc/high-availability-guide/includes/rabbitmq.res b/doc/high-availability-guide/includes/rabbitmq.res deleted file mode 100644 index 5caf161b..00000000 --- a/doc/high-availability-guide/includes/rabbitmq.res +++ /dev/null @@ -1,11 +0,0 @@ -resource rabbitmq { - device minor 1; - disk "/dev/data/rabbitmq"; - meta-disk internal; - on node1 { - address ipv4 10.0.42.100:7701; - } - on node2 { - address ipv4 10.0.42.254:7701; - } -} diff --git a/doc/high-availability-guide/locale/high-availability-guide.pot b/doc/high-availability-guide/locale/high-availability-guide.pot deleted file mode 100644 index fa7960e4..00000000 --- a/doc/high-availability-guide/locale/high-availability-guide.pot +++ /dev/null @@ -1,2008 +0,0 @@ -msgid "" -msgstr "" -"Project-Id-Version: PACKAGE VERSION\n" -"POT-Creation-Date: 2015-08-24 06:00+0000\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME \n" -"Language-Team: LANGUAGE \n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#: ./doc/high-availability-guide/ch_ha_aa_controllers.xml:7(title) -msgid "OpenStack controller nodes" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_controllers.xml:8(para) -msgid "OpenStack controller nodes contain:" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_controllers.xml:11(para) -msgid "All OpenStack API services" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_controllers.xml:14(para) -msgid "All OpenStack schedulers" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_controllers.xml:17(para) -msgid "Memcached service" -msgstr "" - -#: ./doc/high-availability-guide/ch_controller.xml:8(title) -msgid "Cloud controller cluster stack" -msgstr "" - -#: ./doc/high-availability-guide/ch_controller.xml:10(para) -msgid "The cloud controller runs on the management network and must talk to all other services." -msgstr "" - -#: ./doc/high-availability-guide/part_active_passive.xml:7(title) -msgid "HA using active/passive" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_haproxy.xml:8(title) -msgid "HAProxy nodes" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_haproxy.xml:10(para) -msgid "HAProxy is a very fast and reliable solution offering high availability, load balancing, and proxying for TCP and HTTP-based applications. It is particularly suited for web sites crawling under very high loads while needing persistence or Layer 7 processing. Supporting tens of thousands of connections is clearly realistic with today’s hardware." -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_haproxy.xml:14(para) -msgid "For installing HAProxy on your nodes, you should consider its official documentation. And also, you have to consider that HAProxy should not be a single point of failure so you need to ensure its availability by other means, such as Pacemaker or Keepalived. It is advisable to have multiple HAProxy instances running, where the number of these instances is a small odd number like 3 or 5. Also it is a common practice to collocate HAProxy instances with existing OpenStack controllers." -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_haproxy.xml:25(para) -msgid "Here is an example for the HAProxy configuration file:" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_haproxy.xml:178(para) -msgid "After each change of this file, you should restart HAProxy." -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_haproxy.xml:180(para) -msgid "Note that the Galera cluster configuration commands indicate two of the three controllers are backup. That should be done to ensure only one node serves write requests because OpenStack support for multi-node writes is not production-ready yet." -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:7(title) -msgid "OpenStack High Availability Guide" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:15(orgname) ./doc/high-availability-guide/bk-ha-guide.xml:22(holder) -msgid "OpenStack Contributors" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:19(year) -msgid "2012" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:20(year) -msgid "2013" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:21(year) -msgid "2014" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:24(releaseinfo) -msgid "current" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:25(productname) -msgid "OpenStack" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:29(remark) -msgid "Copyright details are filled in by the template." -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:33(para) -msgid "This guide describes how to install, configure, and manage OpenStack for high availability." -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:38(date) -msgid "2015-04-30" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:42(para) -msgid "This guide has various updates for the Kilo release, such as adding MariaDB, updates to the MySQL information, corosync and networking updates." -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:51(date) -msgid "2014-10-17" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:55(para) -msgid "This guide has gone through editorial changes to follow the OpenStack documentation conventions. Various smaller issues have been fixed." -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:64(date) -msgid "2014-05-16" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:68(para) -msgid "Conversion to DocBook." -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:74(date) -msgid "2014-04-17" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:78(para) -msgid "Minor cleanup of typos, otherwise no major revisions for Icehouse release." -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:85(date) -msgid "2012-01-16" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:89(para) -msgid "Organizes guide based on cloud controller and compute nodes." -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:95(date) -msgid "2012-05-24" -msgstr "" - -#: ./doc/high-availability-guide/bk-ha-guide.xml:99(para) -msgid "Begin trunk designation." -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_db.xml:7(title) -msgid "Database" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_db.xml:8(para) -msgid "The first step is installing the database that sits at the heart of the cluster. When we talk about high availability (HA), we talk about several databases (for redundancy) and a means to keep them synchronized. In this case, we choose the MySQL database, along with Galera plug-in for synchronous multi-master replication." -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_db.xml:14(para) -msgid "The Galera Cluster plug-in is a multi-master Cluster based on synchronous replication. It is a high availability solution, which provides high system uptime, no data loss, and scalability for growth." -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_db.xml:19(para) -msgid "The choice of database is not a foregone conclusion; you are not required to use MySQL. It is, however, a fairly common choice in OpenStack installations, so we will cover it here." -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_db.xml:25(para) -msgid "MySQL with Galera is by no means the only way to achieve database HA. MariaDB Galera Cluster (https://mariadb.org/) and Percona XtraDB Cluster (http://www.percona.com/) also work with Galera. You also have the option to use PostgreSQL, which has its own replication, or another database HA option." -msgstr "" - -#: ./doc/high-availability-guide/ch_api.xml:8(title) -msgid "API node cluster stack" -msgstr "" - -#: ./doc/high-availability-guide/ch_api.xml:10(para) -msgid "The API node exposes OpenStack API endpoints onto external network (Internet). It must talk to the cloud controller on the management network." -msgstr "" - -#: ./doc/high-availability-guide/ch_pacemaker.xml:7(title) -msgid "The Pacemaker cluster stack" -msgstr "" - -#: ./doc/high-availability-guide/ch_pacemaker.xml:8(para) -msgid "OpenStack infrastructure high availability relies on the Pacemaker cluster stack, the state-of-the-art high availability and load balancing stack for the Linux platform. Pacemaker is storage and application-agnostic, and is in no way specific to OpenStack." -msgstr "" - -#: ./doc/high-availability-guide/ch_pacemaker.xml:13(para) -msgid "Pacemaker relies on the Corosync messaging layer for reliable cluster communications. Corosync implements the Totem single-ring ordering and membership protocol. It also provides UDP and InfiniBand based messaging, quorum, and cluster membership to Pacemaker." -msgstr "" - -#: ./doc/high-availability-guide/ch_pacemaker.xml:19(para) -msgid "Pacemaker interacts with applications through resource agents (RAs), of which it supports over 70 natively. Pacemaker can also easily use third-party RAs. An OpenStack high-availability configuration uses existing native Pacemaker RAs (such as those managing MySQL databases or virtual IP addresses), existing third-party RAs (such as for RabbitMQ), and native OpenStack RAs (such as those managing the OpenStack Identity and Image services)." -msgstr "" - -#: ./doc/high-availability-guide/part_active_active.xml:7(title) -msgid "HA using active/active" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_rabbitmq.xml:7(title) -msgid "RabbitMQ" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_rabbitmq.xml:8(para) -msgid "RabbitMQ is the default AMQP server used by many OpenStack services. Making the RabbitMQ service highly available involves the following steps:" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_rabbitmq.xml:13(para) ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:7(title) -msgid "Install RabbitMQ" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_rabbitmq.xml:16(para) -msgid "Configure RabbitMQ for HA queues" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_rabbitmq.xml:19(para) -msgid "Configure OpenStack services to use Rabbit HA queues" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:7(title) -msgid "Introduction to OpenStack High Availability" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:8(para) -msgid "High Availability systems seek to minimize two things:" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:11(term) -msgid "System downtime" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:12(para) -msgid "Occurs when a user-facing service is unavailable beyond a specified maximum amount of time." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:16(term) -msgid "Data loss" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:17(para) -msgid "Accidental deletion or destruction of data." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:21(para) -msgid "Most high availability systems guarantee protection against system downtime and data loss only in the event of a single failure. However, they are also expected to protect against cascading failures, where a single failure deteriorates into a series of consequential failures." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:25(para) -msgid "A crucial aspect of high availability is the elimination of single points of failure (SPOFs). A SPOF is an individual piece of equipment or software which will cause system downtime or data loss if it fails. In order to eliminate SPOFs, check that mechanisms exist for redundancy of:" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:32(para) -msgid "Network components, such as switches and routers" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:35(para) -msgid "Applications and automatic service migration" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:38(para) -msgid "Storage components" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:41(para) -msgid "Facility services such as power, air conditioning, and fire protection" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:45(para) -msgid "In the event that a component fails and a back-up system must take on its load, most high availability systems will replace the failed component as quickly as possible to maintain necessary redundancy. This way time spent in a degraded protection state is minimized." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:49(para) -msgid "Most high availability systems will fail in the event of multiple independent (non-consequential) failures. In this case, most systems will protect data over maintaining availability." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:52(para) -msgid "High-availability systems typically achieve an uptime percentage of 99.99% or more, which roughly equates to less than an hour of cumulative downtime per year. In order to achieve this, high availability systems should keep recovery times after a failure to about one to two minutes, sometimes significantly less." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:57(para) -msgid "OpenStack currently meets such availability requirements for its own infrastructure services, meaning that an uptime of 99.99% is feasible for the OpenStack infrastructure proper. However, OpenStack does not guarantee 99.99% availability for individual guest instances." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:61(para) -msgid "Preventing single points of failure can depend on whether or not a service is stateless." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:64(title) -msgid "Stateless vs. Stateful services" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:65(para) -msgid "A stateless service is one that provides a response after your request, and then requires no further attention. To make a stateless service highly available, you need to provide redundant instances and load balance them. OpenStack services that are stateless include nova-api, nova-conductor, glance-api, keystone-api, neutron-api and nova-scheduler." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:75(para) -msgid "A stateful service is one where subsequent requests to the service depend on the results of the first request. Stateful services are more difficult to manage because a single action typically involves more than one request, so simply providing additional instances and load balancing will not solve the problem. For example, if the Horizon user interface reset itself every time you went to a new page, it wouldn't be very useful. OpenStack services that are stateful include the OpenStack database and message queue." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:83(para) -msgid "Making stateful services highly available can depend on whether you choose an active/passive or active/active configuration." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:87(title) -msgid "Active/Passive" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:88(para) -msgid "In an active/passive configuration, systems are set up to bring additional resources online to replace those that have failed. For example, OpenStack would write to the main database while maintaining a disaster recovery database that can be brought online in the event that the main database fails." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:93(para) -msgid "Typically, an active/passive installation for a stateless service would maintain a redundant instance that can be brought online when required. Requests may be handled using a virtual IP address to facilitate return to service with minimal reconfiguration required." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:98(para) -msgid "A typical active/passive installation for a stateful service maintains a replacement resource that can be brought online when required. A separate application (such as Pacemaker or Corosync) monitors these services, bringing the backup online as necessary." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:104(title) -msgid "Active/Active" -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:105(para) -msgid "In an active/active configuration, systems also use a backup but will manage both the main and redundant systems concurrently. This way, if there is a failure the user is unlikely to notice. The backup system is already online, and takes on increased load while the main system is fixed and brought back online." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:110(para) -msgid "Typically, an active/active installation for a stateless service would maintain a redundant instance, and requests are load balanced using a virtual IP address and a load balancer such as HAProxy." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:113(para) -msgid "A typical active/active installation for a stateful service would include redundant services with all instances having an identical state. For example, updates to one instance of a database would also update all other instances. This way a request to one instance is the same as a request to any other. A load balancer manages the traffic to these systems, ensuring that operational systems always handle the request." -msgstr "" - -#: ./doc/high-availability-guide/ch_intro.xml:120(para) -msgid "These are some of the more common ways to implement these high availability architectures, but they are by no means the only ways to do it. The important thing is to make sure that your services are redundant, and available; how you achieve that is up to you. This document will cover some of the more common options for highly available systems." -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_network.xml:7(title) -msgid "OpenStack network nodes" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_network.xml:8(para) -msgid "OpenStack network nodes contain:" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_network.xml:11(para) -msgid "Neutron DHCP agent" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_network.xml:14(para) -msgid "Neutron L2 agent" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_network.xml:17(para) -msgid "Neutron L3 agent" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_network.xml:20(para) -msgid "Neutron metadata agent" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_network.xml:23(para) -msgid "Neutron LBaaS agent" -msgstr "" - -#: ./doc/high-availability-guide/ch_ha_aa_network.xml:27(para) -msgid "The neutron L2 agent does not need to be highly available. It has to be installed on each data forwarding node and controls the virtual networking drivers as Open vSwitch or Linux Bridge. One L2 agent runs per node and controls its virtual interfaces. That's why it cannot be distributed and highly available." -msgstr "" - -#: ./doc/high-availability-guide/ch_network.xml:8(title) -msgid "Network controller cluster stack" -msgstr "" - -#: ./doc/high-availability-guide/ch_network.xml:10(para) -msgid "The network controller sits on the management and data network, and needs to be connected to the Internet if an instance will need access to the Internet." -msgstr "" - -#: ./doc/high-availability-guide/ch_network.xml:12(para) -msgid "Pacemaker requires that both nodes have different hostnames. Because of that, RA scripts could require some adjustments since the Networking scheduler will be aware of one node, for example a virtual router attached to a single L3 node. For example, both nodes could set different hostnames in the configuration files, and when the l3-agent started by Pacemaker, the node's hostname will be changed to network-controller automatically. Whichever node starts the l3-agent will have the same hostname." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:7(title) -msgid "Configure OpenStack services to use RabbitMQ" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:8(para) -msgid "We have to configure the OpenStack components to use at least two RabbitMQ nodes." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:10(para) -msgid "Do this configuration on all services using RabbitMQ:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:13(para) -msgid "RabbitMQ HA cluster host:port pairs:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:17(para) -msgid "How frequently to retry connecting with RabbitMQ:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:21(para) -msgid "How long to back-off for between retries when connecting to RabbitMQ:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:26(para) -msgid "Maximum retries with trying to connect to RabbitMQ (infinite by default):" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:31(para) -msgid "Use durable queues in RabbitMQ:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:35(para) -msgid "Use HA queues in RabbitMQ (x-ha-policy: all):" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:41(para) -msgid "If you change the configuration from an old setup which did not use HA queues, you should interrupt the service:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:48(para) -msgid "Services currently working with HA queues:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:51(para) -msgid "OpenStack Compute" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:54(para) -msgid "OpenStack Block Storage" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:57(para) -msgid "OpenStack Networking" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_openstack_services_to_use_rabbitmq.xml:60(para) -msgid "Telemetry" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:7(title) -msgid "Configure RabbitMQ" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:8(para) -msgid "We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, a logical grouping of several Erlang nodes." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:10(para) -msgid "We have to consider that while exchanges and bindings will survive the loss of individual nodes, queues and their messages will not because a queue and its contents is located on one node. If we lose this node, we also lose the queue." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:14(para) -msgid "Mirrored queues in RabbitMQ improve the availability of service since it will be resilient to failures." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:16(para) -msgid "We consider that we run (at least) two RabbitMQ servers and we call the nodes rabbit1 and rabbit2. To build a broker, we need to ensure that all nodes have the same Erlang cookie file." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:20(para) -msgid "To do so, stop RabbitMQ everywhere and copy the cookie from the first node to the other node(s):" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:23(replaceable) ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:30(replaceable) ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:31(replaceable) ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:32(replaceable) ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:37(replaceable) ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:41(replaceable) ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:45(replaceable) ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:46(replaceable) ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:46(replaceable) -msgid "NODE" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:24(para) -msgid "On the target nodes ensure the correct owner, group, and permissions of the .erlang.cookie file:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:28(para) -msgid "Start RabbitMQ on all nodes and verify the nodes are running:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:35(para) -msgid "Run the following commands on all nodes except the first one:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:43(para) -msgid "To verify the cluster status:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:47(para) -msgid "If the cluster is working, you can now proceed to creating users and passwords for queues." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:49(para) -msgid "To ensure that all queues, except those with auto-generated names, are mirrored across all running nodes it is necessary to set the policy key ha-mode to all. Run the following command on one of the nodes:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_configure_rabbitmq.xml:55(para) -msgid "More information about highly available queues and clustering can be found in the official RabbitMQ documentation." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:9(title) -msgid "On Ubuntu and Debian" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:10(para) ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:21(para) -msgid "RabbitMQ is packaged on both distros:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:14(link) -msgid "Official manual for installing RabbitMQ on Ubuntu and Debian" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:20(title) -msgid "On Fedora and RHEL" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:25(link) -msgid "Official manual for installing RabbitMQ on Fedora and RHEL" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:31(title) -msgid "On openSUSE and SLES" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:33(title) -msgid "On openSUSE:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:40(link) ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:45(link) -msgid "Official manual for installing RabbitMQ on openSUSE" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:49(title) -msgid "On SLES 12:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_rabbitmq/section_install_rabbitmq.xml:53(para) -msgid "The packages are signed by GPG key 893A90DAD85F9316. You should verify the fingerprint of the imported GPG key before using it." -msgstr "" - -#: ./doc/high-availability-guide/api/section_api_vip.xml:7(title) -msgid "Configure the VIP" -msgstr "" - -#: ./doc/high-availability-guide/api/section_api_vip.xml:8(para) -msgid "First, you must select and assign a virtual IP address (VIP) that can freely float between cluster nodes." -msgstr "" - -#: ./doc/high-availability-guide/api/section_api_vip.xml:10(para) -msgid "This configuration creates p_ip_api, a virtual IP address for use by the API node (192.168.42.103):" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:7(title) -msgid "Highly available OpenStack Networking server" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:8(para) -msgid "OpenStack Networking is the network connectivity service in OpenStack. Making the OpenStack Networking Server service highly available in active / passive mode involves the following tasks:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:13(para) -msgid "Configure OpenStack Networking to listen on the virtual IP address," -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:17(para) -msgid "Manage the OpenStack Networking API Server daemon with the Pacemaker cluster manager," -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:21(para) -msgid "Configure OpenStack services to use the virtual IP address." -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:25(para) -msgid "Here is the documentation for installing OpenStack Networking service." -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:29(title) -msgid "Add OpenStack Networking Server resource to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:30(para) ./doc/high-availability-guide/api/section_cinder_api.xml:29(para) ./doc/high-availability-guide/api/section_glance_api.xml:29(para) ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:26(para) ./doc/high-availability-guide/api/section_keystone.xml:29(para) ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:20(para) ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:15(para) ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:20(para) -msgid "First of all, you need to download the resource agent to your system:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:35(para) -msgid "You can now add the Pacemaker configuration for OpenStack Networking Server resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:43(para) -msgid "This configuration creates p_neutron-server, a resource for manage OpenStack Networking Server service" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:45(para) -msgid "crm configure supports batch input, so you may copy and paste the above into your live pacemaker configuration, and then make changes as required. For example, you may enter edit p_neutron-server from the crm configure menu and edit the resource to match your preferred virtual IP address." -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:51(para) -msgid "Once completed, commit your configuration changes by entering commit from the crm configure menu. Pacemaker will then start the OpenStack Networking API service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:57(title) -msgid "Configure OpenStack Networking server" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:58(para) -msgid "Edit /etc/neutron/neutron.conf:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:79(title) -msgid "Configure OpenStack services to use highly available OpenStack Networking server" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:81(para) -msgid "Your OpenStack services must now point their OpenStack Networking Server configuration to the highly available, virtual cluster IP address — rather than an OpenStack Networking server’s physical IP address as you normally would." -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:85(para) -msgid "For example, you should configure OpenStack Compute for using highly available OpenStack Networking server in editing nova.conf file:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:89(para) -msgid "You need to create the OpenStack Networking server endpoint with this IP." -msgstr "" - -#: ./doc/high-availability-guide/api/section_neutron_server.xml:92(para) ./doc/high-availability-guide/api/section_glance_api.xml:102(para) ./doc/high-availability-guide/api/section_keystone.xml:91(para) -msgid "If you are using both private and public IP addresses, you should create two Virtual IP addresses and define your endpoint like this:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:7(title) -msgid "Highly available Block Storage API" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:8(para) -msgid "Making the Block Storage (cinder) API service highly available in active / passive mode involves:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:12(para) -msgid "Configuring Block Storage to listen on the VIP address" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:15(para) -msgid "Managing Block Storage API daemon with the Pacemaker cluster manager" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:19(para) -msgid "Configuring OpenStack services to use this IP address" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:23(para) -msgid "Here is the documentation for installing Block Storage service." -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:28(title) -msgid "Add Block Storage API resource to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:34(para) -msgid "You can now add the Pacemaker configuration for Block Storage API resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:41(para) ./doc/high-availability-guide/api/section_glance_api.xml:41(para) ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:38(para) ./doc/high-availability-guide/controller/section_rabbitmq.xml:165(para) ./doc/high-availability-guide/controller/section_mysql.xml:182(para) ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:31(para) ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:26(para) -msgid "This configuration creates" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:44(para) -msgid "p_cinder-api, a resource for manage Block Storage API service" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:48(para) -msgid "crm configure supports batch input, so you may copy and paste the above into your live pacemaker configuration, and then make changes as required. For example, you may enter edit p_ip_cinder-api from the crm configure menu and edit the resource to match your preferred virtual IP address." -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:53(para) -msgid "Once completed, commit your configuration changes by entering commit from the crm configure menu. Pacemaker will then start the Block Storage API service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:59(title) -msgid "Configure Block Storage API service" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:60(para) -msgid "Edit /etc/cinder/cinder.conf:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:77(title) -msgid "Configure OpenStack services to use highly available Block Storage API" -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:79(para) -msgid "Your OpenStack services must now point their Block Storage API configuration to the highly available, virtual cluster IP address — rather than a Block Storage API server’s physical IP address as you normally would." -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:83(para) -msgid "You must create the Block Storage API endpoint with this IP." -msgstr "" - -#: ./doc/high-availability-guide/api/section_cinder_api.xml:85(para) -msgid "If you are using both private and public IP, you should create two Virtual IPs and define your endpoint like this:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:7(title) -msgid "Highly available OpenStack Image API" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:8(para) -msgid "The OpenStack Image service offers a service for discovering, registering, and retrieving virtual machine images. To make the OpenStack Image API service highly available in active / passive mode, you must:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:13(para) -msgid "Configure the OpenStack Image service to listen on the VIP address." -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:16(para) -msgid "Manage the OpenStack Image API daemon with the Pacemaker cluster manager." -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:20(para) ./doc/high-availability-guide/api/section_keystone.xml:20(para) -msgid "Configure OpenStack services to use this IP address." -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:24(para) -msgid "Here is the documentation for installing the OpenStack Image API service." -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:28(title) -msgid "Add OpenStack Image API resource to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:34(para) -msgid "You can now add the Pacemaker configuration for the OpenStack Image API resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:44(para) -msgid "p_glance-api, a resource for managing OpenStack Image API service" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:48(para) -msgid "crm configure supports batch input, so you may copy and paste the above into your live Pacemaker configuration, and then make changes as required. For example, you may enter edit p_ip_glance-api from the crm configure menu and edit the resource to match your preferred virtual IP address." -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:53(para) -msgid "Once completed, commit your configuration changes by entering commit from the crm configure menu. Pacemaker will then start the OpenStack Image API service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:59(title) -msgid "Configure OpenStack Image service API" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:60(para) -msgid "Edit /etc/glance/glance-api.conf:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:80(title) -msgid "Configure OpenStack services to use high available OpenStack Image API" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:82(para) -msgid "Your OpenStack services must now point their OpenStack Image API configuration to the highly available, virtual cluster IP address — rather than an OpenStack Image API server’s physical IP address as you normally would." -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:86(para) -msgid "For OpenStack Compute, for example, if your OpenStack Image API service IP address is 192.168.42.103 as in the configuration explained here, you would use the following configuration in your nova.conf file:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:95(para) -msgid "In versions prior to Juno, this option was called glance_api_servers in the [DEFAULT] section." -msgstr "" - -#: ./doc/high-availability-guide/api/section_glance_api.xml:99(para) -msgid "You must also create the OpenStack Image API endpoint with this IP." -msgstr "" - -#: ./doc/high-availability-guide/api/section_api_pacemaker.xml:7(title) -msgid "Configure Pacemaker group" -msgstr "" - -#: ./doc/high-availability-guide/api/section_api_pacemaker.xml:8(para) -msgid "Finally, we need to create a service group to ensure that the virtual IP is linked to the API services resources:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:7(title) -msgid "Highly available Telemetry central agent" -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:8(para) -msgid "Telemetry (ceilometer) is the metering and monitoring service in OpenStack. The Central agent polls for resource utilization statistics for resources not tied to instances or compute nodes." -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:12(para) -msgid "Due to limitations of a polling model, a single instance of this agent can be polling a given list of meters, unless workload partitioning has been configured for multiple central agents. In this setup, we install this service on the API nodes also in the active / passive mode." -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:17(para) -msgid "Making the Telemetry central agent service highly available in active / passive mode involves managing its daemon with the Pacemaker cluster manager." -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:21(para) -msgid "You will find at this page the process to install the Telemetry central agent." -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:25(title) -msgid "Add the Telemetry central agent resource to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:30(para) -msgid "You may then proceed with adding the Pacemaker configuration for the Telemetry central agent resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:41(para) -msgid "p_ceilometer-agent-central, a resource for managing the Ceilometer Central Agent service" -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:45(para) ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:38(para) ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:38(para) -msgid "crm configure supports batch input, so you may copy and paste the above into your live pacemaker configuration, and then make changes as required." -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:48(para) -msgid "Once completed, commit your configuration changes by entering commit from the crm configure menu. Pacemaker will then start the Ceilometer Central Agent service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:54(title) -msgid "Configure Telemetry central agent service" -msgstr "" - -#: ./doc/high-availability-guide/api/section_ceilometer_agent_central.xml:55(para) -msgid "Edit /etc/ceilometer/ceilometer.conf:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:7(title) -msgid "Highly available OpenStack Identity" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:8(para) -msgid "OpenStack Identity is the Identity Service in OpenStack and used by many services. Making the OpenStack Identity service highly available in active / passive mode involves" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:13(para) -msgid "Configure OpenStack Identity to listen on the VIP address," -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:16(para) -msgid "Managing OpenStack Identity daemon with the Pacemaker cluster manager," -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:24(para) -msgid "Here is the documentation for installing OpenStack Identity service." -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:28(title) -msgid "Add OpenStack Identity resource to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:36(para) -msgid "You can now add the Pacemaker configuration for OpenStack Identity resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:43(para) -msgid "This configuration creates p_keystone, a resource for managing the OpenStack Identity service." -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:45(para) -msgid "crm configure supports batch input, so you may copy and paste the above into your live pacemaker configuration, and then make changes as required. For example, you may enter edit p_ip_keystone from the crm configure menu and edit the resource to match your preferred virtual IP address." -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:50(para) -msgid "Once completed, commit your configuration changes by entering commit from the crm configure menu. Pacemaker will then start the OpenStack Identity service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:56(title) -msgid "Configure OpenStack Identity service" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:57(para) -msgid "You need to edit your OpenStack Identity configuration file (keystone.conf) and change the bind parameters:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:60(para) -msgid "On Havana:" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:62(para) -msgid "The admin_bind_host option lets you use a private network for the admin access." -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:66(para) -msgid "To be sure all data will be highly available, you should be sure that you store everything in the MySQL database (which is also highly available):" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:77(title) -msgid "Configure OpenStack services to use the highly available OpenStack Identity" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:79(para) -msgid "Your OpenStack services must now point their OpenStack Identity configuration to the highly available, virtual cluster IP address — rather than a OpenStack Identity server’s physical IP address as you normally would." -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:83(para) -msgid "For example with OpenStack Compute, if your OpenStack Identity service IP address is 192.168.42.103 as in the configuration explained here, you would use the following line in your API configuration file (api-paste.ini):" -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:88(para) -msgid "You also need to create the OpenStack Identity Endpoint with this IP." -msgstr "" - -#: ./doc/high-availability-guide/api/section_keystone.xml:98(para) -msgid "If you are using the horizon dashboard, you should edit the local_settings.py file:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_install_packages.xml:7(title) -msgid "Install packages" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_install_packages.xml:8(para) -msgid "On any host that is meant to be part of a Pacemaker cluster, you must first establish cluster communications through the Corosync messaging layer. This involves installing the following packages (and their dependencies, which your package manager will normally install automatically):" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_install_packages.xml:15(para) -msgid "pacemaker (Note that the crm shell should be downloaded separately.)" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_install_packages.xml:20(package) -msgid "crmsh" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_install_packages.xml:25(package) -msgid "corosync" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_install_packages.xml:30(package) -msgid "cluster-glue" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_install_packages.xml:34(para) -msgid "fence-agents (Fedora only; all other distributions use fencing agents from cluster-glue)" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_install_packages.xml:40(package) -msgid "resource-agents" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml:7(title) -msgid "Set basic cluster properties" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml:8(para) -msgid "Once your Pacemaker cluster is set up, it is recommended to set a few basic cluster properties. To do so, start the shell and change into the configuration menu by entering configure. Alternatively, you may jump straight into the Pacemaker configuration menu by typing directly from a shell prompt." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml:14(para) -msgid "Then, set the following properties:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml:22(para) -msgid "Setting is required in 2-node Pacemaker clusters for the following reason: if quorum enforcement is enabled, and one of the two nodes fails, then the remaining node can not establish a majority of quorum votes necessary to run services, and thus it is unable to take over any resources. In this case, the appropriate workaround is to ignore loss of quorum in the cluster. This should only only be done in 2-node clusters: do not set this property in Pacemaker clusters with more than two nodes. Note that a two-node cluster with this setting exposes a risk of split-brain because either half of the cluster, or both, are able to become active in the event that both nodes remain online but lose communication with one another. The preferred configuration is 3 or more nodes per cluster." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml:37(para) -msgid "Setting , and to 1000 instructs Pacemaker to keep a longer history of the inputs processed, and errors and warnings generated, by its Policy Engine. This history is typically useful in case cluster troubleshooting becomes necessary." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml:45(para) -msgid "Pacemaker uses an event-driven approach to cluster state processing. However, certain Pacemaker actions occur at a configurable interval, , which defaults to 15 minutes. It is usually prudent to reduce this to a shorter interval, such as 5 or 3 minutes." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml:52(para) -msgid "Once you have made these changes, you may commit the updated configuration." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:7(title) -msgid "Starting Corosync" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:8(para) -msgid "Corosync is started as a regular system service. Depending on your distribution, it may ship with an LSB init script, an upstart job, or a systemd unit file. Either way, the service is usually named corosync:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:14(para) ./doc/high-availability-guide/pacemaker/section_start_pacemaker.xml:14(para) -msgid " (LSB)" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:19(para) ./doc/high-availability-guide/pacemaker/section_start_pacemaker.xml:19(para) -msgid " (LSB, alternate)" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:24(para) ./doc/high-availability-guide/pacemaker/section_start_pacemaker.xml:24(para) -msgid " (upstart)" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:29(para) ./doc/high-availability-guide/pacemaker/section_start_pacemaker.xml:29(para) -msgid " (systemd)" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:34(para) -msgid "You can now check the Corosync connectivity with two tools." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:35(para) -msgid "The utility, when invoked with the option, gives a summary of the health of the communication rings:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:47(para) -msgid "The utility can be used to dump the Corosync cluster member list:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:56(para) -msgid "You should see a status=joined entry for each of your constituent cluster nodes." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_starting_corosync.xml:59(para) -msgid "If you are using Corosync version 2, use the utility as it is a direct replacement for ." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_start_pacemaker.xml:7(title) -msgid "Start Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_start_pacemaker.xml:8(para) -msgid "Once the Corosync services have been started and you have established that the cluster is communicating properly, it is safe to start pacemakerd, the Pacemaker master control process:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_start_pacemaker.xml:34(para) -msgid "Once the Pacemaker services have started, Pacemaker will create a default empty cluster configuration with no resources. You may observe Pacemaker's status with the utility:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:7(title) -msgid "Set up Corosync" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:8(para) -msgid "Besides installing the Corosync package, you must also create a configuration file, stored in /etc/corosync/corosync.conf. Corosync can be configured to work with either multicast or unicast IP addresses." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:14(title) -msgid "Set up Corosync with multicast" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:15(para) -msgid "Most distributions ship an example configuration file (corosync.conf.example) as part of the documentation bundled with the Corosync package. An example Corosync configuration file is shown below:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:19(title) -msgid "Corosync configuration file (corosync.conf)" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:91(para) -msgid "The value specifies the time, in milliseconds, during which the Corosync token is expected to be transmitted around the ring. When this timeout expires, the token is declared lost, and after lost tokens the non-responding processor (cluster node) is declared dead. In other words, × is the maximum time a node is allowed to not respond to cluster messages before being considered dead. The default for is 1000 (1 second), with 4 allowed retransmits. These defaults are intended to minimize failover times, but can cause frequent \"false alarms\" and unintended failovers in case of short network interruptions. The values used here are safer, albeit with slightly extended failover times." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:107(para) -msgid "With enabled, Corosync nodes mutually authenticate using a 128-byte shared secret stored in /etc/corosync/authkey, which may be generated with the utility. When using , cluster communications are also encrypted." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:115(para) -msgid "In Corosync configurations using redundant networking (with more than one ), you must select a Redundant Ring Protocol (RRP) mode other than none. active is the recommended RRP mode." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:121(para) -msgid "There are several things to note about the recommended interface configuration:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:125(para) -msgid "The must differ between all configured interfaces, starting with 0." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:129(para) -msgid "The is the network address of the interfaces to bind to. The example uses two network addresses of /24 IPv4 subnets." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:134(para) -msgid "Multicast groups () must not be reused across cluster boundaries. In other words, no two distinct clusters should ever use the same multicast group. Be sure to select multicast addresses compliant with RFC 2365, \"Administratively Scoped IP Multicast\"." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:142(para) -msgid "For firewall configurations, note that Corosync communicates over UDP only, and uses mcastport (for receives) and mcastport - 1 (for sends)." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:149(para) -msgid "The service declaration for the pacemaker service may be placed in the corosync.conf file directly, or in its own separate file, /etc/corosync/service.d/pacemaker." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:155(para) -msgid "If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out lines under the service stanza, which enables Pacemaker to start up." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:161(para) -msgid "Once created, the corosync.conf file (and the authkey file if the option is enabled) must be synchronized across all cluster nodes." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:166(title) -msgid "Set up Corosync with unicast" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:167(para) -msgid "Some environments may not support multicast. For such cases, Corosync should be configured for unicast. An example fragment of the Corosync configuration file is shown below:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:171(title) -msgid "Corosync configuration file fragment (corosync.conf)" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:207(para) -msgid "If the is set to yes, the broadcast address is used for communication. If this option is set, should not be set." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:212(para) -msgid "The directive controls the transport mechanism used. To avoid the use of multicast entirely, a unicast transport parameter should be specified. This requires specifying the list of members in directive; this could potentially make up the membership before deployment. The default is . The transport type can also be set to or ." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:219(para) -msgid "Within the directive, it is possible to specify specific information about nodes in cluster. Directive can contain only the sub-directive, which specifies every node that should be a member of the membership, and where non-default options are needed. Every node must have at least the field filled." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:225(para) -msgid "For UDPU, every node that should be a member of the membership must be specified." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:228(para) -msgid "Possible options are:" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:229(para) -msgid "The specifies the IP address of one of the nodes. X is the ring number." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:231(para) -msgid "The configuration option is optional when using IPv4 and required when using IPv6. This is a 32-bit value specifying the node identifier delivered to the cluster membership service. When using IPv4, this defaults to the 32-bit IP address to which the system is bound with the ring identifier of 0. The node identifier value of zero is reserved and should not be used." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:242(title) -msgid "Set up Corosync with votequorum library for a full-size cluster" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:243(para) -msgid "This section describes a full-size cluster configuration with three or more members which is appropriate for production deployments. For a two-node configuration that can be used for demostrations and testing, please go to the next section." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:255(para) -msgid "Query the quorum status" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:258(para) -msgid "Get a list of nodes known to the quorum service" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:261(para) -msgid "Receive notifications of quorum state changes" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:264(para) -msgid "Change the number of votes assigned to a node" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:267(para) -msgid "Change the number of expected votes for a cluster to be quorate" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:271(para) -msgid "Connect an additional quorum device to allow small clusters remain quorate during node outages" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:248(para) -msgid "Votequorum library is a part of the Corosync project. It provides an interface to the vote-based quorum service and it must be explicitly enabled in the Corosync configuration file. The main role of the votequorum library is to avoid split-brain situations, but it also provides a mechanism to: Votequorum library has been created to replace and eliminate from advanced cluster configurations qdisk, disk-based quorum daemon for CMAN." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:279(title) -msgid "Votequorum service configuration within Corosync" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:291(para) -msgid "Provider enables votequorum library, this is the only required option." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:294(para) -msgid "The cluster is fully operational with 7 nodes (each node has 1 vote), quorum: 4. If list of nodes is specified as the is ignored." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:299(para) -msgid " When starting up a cluster (all nodes down) it will hold the cluster quorum until all of the nodes become online and joint the cluster first time (new in Corosync 2.0)." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:304(para) -msgid " enable Last Man Standing (LMS) feature (disabled by default: 0). If a cluster is on the quorum edge (expected_votes: 7; online nodes: 4) for time longer than configured in , the cluster can recalculate quorum and continue operating even if the next node will be lost. This logic is repeated until the number of online nodes in the cluster reach 2. In order to allow cluster step down from 2 members to only 1, what is not recommended option needs to be set." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:314(para) -msgid " is time required to recalculate the quorum after one or most hosts have been lost from the cluster. To do the new quorum recalculation, the cluster needs to have quorum at least for , time in [ms] (default: 10000ms)." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:324(title) -msgid "Set up Corosync with votequorum library for two-host clusters" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:325(para) -msgid "The two-node cluster configuration is a special case that Pacemaker supports for demonstration and testing; it is a special feature of the votequorum library and is not recommended for production environments." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:330(title) -msgid "Multicast votequorum service configuration for two-host Corosync cluster" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:341(para) ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:384(para) -msgid " enables votequorum provider library." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:346(para) -msgid "Setting to 1 enables by default. It is still possible to override by explicitly setting it to 0. If more than 2 nodes join the cluster, the option is automatically disabled." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:353(para) -msgid "Disabling in a two-node cluster may be dangerous as it can lead to a situation (stonith deathmatch) where each node comes up, assumes the other is down, and fences peer in order to safely start clustered services. The peer eventually comes up and repeats the process until the underlying fault is rectified." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:344(para) -msgid "Put the cluser into two-node operational mode (default: 0). " -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:365(title) -msgid "Unicast (UDP) votequorum service configuration for two-host Corosync cluster" -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:387(para) -msgid "For the cluster to work with two members only artificially sets quorum below mathematical majority." -msgstr "" - -#: ./doc/high-availability-guide/pacemaker/section_set_up_corosync.xml:391(para) -msgid "Unicast Corosync configuratrion requires option to explicitly provide a list of cluster members." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:11(title) -msgid "Highly available RabbitMQ" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:12(para) -msgid "RabbitMQ is the default AMQP server used by many OpenStack services. Making the RabbitMQ service highly available involves:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:16(para) -msgid "configuring a DRBD device for use by RabbitMQ," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:19(para) -msgid "configuring RabbitMQ to use a data directory residing on that DRBD device," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:23(para) -msgid "selecting and assigning a virtual IP address (VIP) that can freely float between cluster nodes," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:27(para) -msgid "configuring RabbitMQ to listen on that IP address," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:30(para) -msgid "managing all resources, including the RabbitMQ daemon itself, with the Pacemaker cluster manager." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:35(para) -msgid "Active-active mirrored queues is another method for configuring RabbitMQ versions 3.3.0 and later for high availability. You can also manage a RabbitMQ cluster with active-active mirrored queues using the Pacemaker cluster manager." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:41(title) ./doc/high-availability-guide/controller/section_mysql.xml:44(title) -msgid "Configure DRBD" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:42(para) -msgid "The Pacemaker based RabbitMQ server requires a DRBD resource from which it mounts the /var/lib/rabbitmq directory. In this example, the DRBD resource is simply named rabbitmq:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:47(title) -msgid "rabbitmq DRBD resource configuration (/etc/drbd.d/rabbitmq.res)" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:63(para) -msgid "This resource uses an underlying local disk (in DRBD terminology, a backing device) named /dev/data/rabbitmq on both cluster nodes, node1 and node2. Normally, this would be an LVM Logical Volume specifically set aside for this purpose. The DRBD meta-disk is internal, meaning DRBD-specific metadata is being stored at the end of the disk device itself. The device is configured to communicate between IPv4 addresses 10.0.42.100 and 10.0.42.254, using TCP port 7701. Once enabled, it will map to a local DRBD block device with the device minor number 1, that is, /dev/drbd1." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:74(para) -msgid "Enabling a DRBD resource is explained in detail in the DRBD User's Guide. In brief, the proper sequence of commands is this:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:82(para) -msgid "Initializes DRBD metadata and writes the initial set of metadata to /dev/data/rabbitmq. Must be completed on both nodes." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:87(para) -msgid "Creates the /dev/drbd1 device node, attaches the DRBD device to its backing store, and connects the DRBD node to its peer. Must be completed on both nodes." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:92(para) ./doc/high-availability-guide/controller/section_mysql.xml:95(para) -msgid "Kicks off the initial device synchronization, and puts the device into the primary (readable and writable) role. See Resource roles (from the DRBD User's Guide) for a more detailed description of the primary and secondary roles in DRBD. Must be completed on one node only, namely the one where you are about to continue with creating your filesystem." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:103(title) -msgid "Create a file system" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:104(para) -msgid "Once the DRBD resource is running and in the primary role (and potentially still in the process of running the initial device synchronization), you may proceed with creating the filesystem for RabbitMQ data. XFS is generally the recommended filesystem:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:109(para) ./doc/high-availability-guide/controller/section_mysql.xml:113(para) -msgid "You may also use the alternate device path for the DRBD device, which may be easier to remember as it includes the self-explanatory resource name:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:113(para) ./doc/high-availability-guide/controller/section_mysql.xml:117(para) -msgid "Once completed, you may safely return the device to the secondary role. Any ongoing device synchronization will continue in the background:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:119(title) -msgid "Prepare RabbitMQ for Pacemaker high availability" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:120(para) -msgid "In order for Pacemaker monitoring to function properly, you must ensure that RabbitMQ's .erlang.cookie files are identical on all nodes, regardless of whether DRBD is mounted there or not. The simplest way of doing so is to take an existing .erlang.cookie from one of your nodes, copying it to the RabbitMQ data directory on the other node, and also copying it to the DRBD-backed filesystem." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:133(title) -msgid "Add RabbitMQ resources to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:134(para) -msgid "You may now proceed with adding the Pacemaker configuration for RabbitMQ resources. Connect to the Pacemaker cluster with , and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:168(para) -msgid "p_ip_rabbitmq, a virtual IP address for use by RabbitMQ (192.168.42.100)," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:172(para) -msgid "p_fs_rabbitmq, a Pacemaker managed filesystem mounted to /var/lib/rabbitmq on whatever node currently runs the RabbitMQ service," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:177(para) -msgid "ms_drbd_rabbitmq, the master/slave set managing the rabbitmq DRBD resource," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:181(para) -msgid "a service group and order and colocation constraints to ensure resources are started on the correct nodes, and in the correct sequence." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:186(para) -msgid " supports batch input, so you may copy and paste the above into your live pacemaker configuration, and then make changes as required. For example, you may enter edit p_ip_rabbitmq from the menu and edit the resource to match your preferred virtual IP address." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:192(para) -msgid "Once completed, commit your configuration changes by entering commit from the menu. Pacemaker will then start the RabbitMQ service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:198(title) -msgid "Configure OpenStack services for highly available RabbitMQ" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:199(para) -msgid "Your OpenStack services must now point their RabbitMQ configuration to the highly available, virtual cluster IP addressrather than a RabbitMQ server's physical IP address as you normally would." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:203(para) -msgid "For OpenStack Image, for example, if your RabbitMQ service IP address is 192.168.42.100 as in the configuration explained here, you would use the following line in your OpenStack Image API configuration file (glance-api.conf):" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_rabbitmq.xml:209(para) -msgid "No other changes are necessary to your OpenStack configuration. If the node currently hosting your RabbitMQ experiences a problem necessitating service failover, your OpenStack services may experience a brief RabbitMQ interruption, as they would in the event of a network hiccup, and then continue to run normally." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:11(title) -msgid "Highly available MySQL" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:12(para) -msgid "MySQL is the default database server used by many OpenStack services. Making the MySQL service highly available involves:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:16(para) -msgid "Configuring a DRBD device for use by MySQL" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:19(para) -msgid "Configuring MySQL to use a data directory residing on that DRBD device" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:23(para) -msgid "Selecting and assigning a virtual IP address (VIP) that can freely float between cluster nodes" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:27(para) -msgid "Configuring MySQL to listen on that IP address" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:30(para) -msgid "Managing all resources, including the MySQL daemon itself, with the Pacemaker cluster manager" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:36(para) -msgid "MySQL/Galera is an alternative method of configuring MySQL for high availability. It is likely to become the preferred method of achieving MySQL high availability once it has sufficiently matured. At the time of writing, however, the Pacemaker/DRBD based approach remains the recommended one for OpenStack environments." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:45(para) -msgid "The Pacemaker based MySQL server requires a DRBD resource from which it mounts the /var/lib/mysql directory. In this example, the DRBD resource is simply named mysql:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:49(title) -msgid "mysql DRBD resource configuration (/etc/drbd.d/mysql.res)" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:65(para) -msgid "This resource uses an underlying local disk (in DRBD terminology, a backing device) named /dev/data/mysql on both cluster nodes, node1 and node2. Normally, this would be an LVM Logical Volume specifically set aside for this purpose. The DRBD meta-disk is internal, meaning DRBD-specific metadata is being stored at the end of the disk device itself. The device is configured to communicate between IPv4 addresses 10.0.42.100 and 10.0.42.254, using TCP port 7700. Once enabled, it will map to a local DRBD block device with the device minor number 0, that is, /dev/drbd0." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:75(para) -msgid "Enabling a DRBD resource is explained in detail in the DRBD User's Guide. In brief, the proper sequence of commands is this:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:84(para) -msgid "Initializes DRBD metadata and writes the initial set of metadata to /dev/data/mysql. Must be completed on both nodes." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:89(para) -msgid "Creates the /dev/drbd0 device node, attaches the DRBD device to its backing store, and connects the DRBD node to its peer. Must be completed on both nodes." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:106(title) -msgid "Creating a file system" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:107(para) -msgid "Once the DRBD resource is running and in the primary role (and potentially still in the process of running the initial device synchronization), you may proceed with creating the filesystem for MySQL data. XFS is generally the recommended filesystem due to its journaling, efficient allocation, and performance:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:123(title) -msgid "Prepare MySQL for Pacemaker high availability" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:124(para) -msgid "In order for Pacemaker monitoring to function properly, you must ensure that MySQL's database files reside on the DRBD device. If you already have an existing MySQL database, the simplest approach is to just move the contents of the existing /var/lib/mysql directory into the newly created filesystem on the DRBD device." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:130(para) -msgid "You must complete the next step while the MySQL database server is shut down." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:136(para) -msgid "For a new MySQL installation with no existing data, you may also run the command:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:141(para) -msgid "Regardless of the approach, the steps outlined here must be completed on only one cluster node." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:145(title) -msgid "Add MySQL resources to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:146(para) -msgid "You can now add the Pacemaker configuration for MySQL resources. Connect to the Pacemaker cluster with , and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:185(para) -msgid "p_ip_mysql, a virtual IP address for use by MySQL (192.168.42.101)," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:189(para) -msgid "p_fs_mysql, a Pacemaker managed filesystem mounted to /var/lib/mysql on whatever node currently runs the MySQL service," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:194(para) -msgid "ms_drbd_mysql, the master/slave set managing the mysql DRBD resource," -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:198(para) -msgid "a service group and order and colocation constraints to ensure resources are started on the correct nodes, and in the correct sequence." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:203(para) -msgid " supports batch input, so you may copy and paste the above into your live pacemaker configuration, and then make changes as required. For example, you may enter edit p_ip_mysql from the menu and edit the resource to match your preferred virtual IP address." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:208(para) -msgid "Once completed, commit your configuration changes by entering commit from the menu. Pacemaker will then start the MySQL service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:214(title) -msgid "Configure OpenStack services for highly available MySQL" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:215(para) -msgid "Your OpenStack services must now point their MySQL configuration to the highly available, virtual cluster IP addressrather than a MySQL server's physical IP address as you normally would." -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:218(para) -msgid "For OpenStack Image, for example, if your MySQL service IP address is 192.168.42.101 as in the configuration explained here, you would use the following line in your OpenStack Image registry configuration file (glance-registry.conf):" -msgstr "" - -#: ./doc/high-availability-guide/controller/section_mysql.xml:223(para) -msgid "No other changes are necessary to your OpenStack configuration. If the node currently hosting your database experiences a problem necessitating service failover, your OpenStack services may experience a brief MySQL interruption, as they would in the event of a network hiccup, and then continue to run normally." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_memcached.xml:7(title) -msgid "Memcached" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_memcached.xml:8(para) -msgid "Most OpenStack services can use Memcached to store ephemeral data, such as tokens. Although Memcached does not support typical forms of redundancy, such as clustering, OpenStack services can use almost any number of instances by configuring multiple hostnames or IP addresses. The Memcached client implements hashing to balance objects among the instances. Failure of an instance only impacts a percentage of the objects and the client automatically removes it from the list of instances." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_memcached.xml:16(para) -msgid "To install and configure it, read the official documentation." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_memcached.xml:19(para) -msgid "Memory caching is managed by oslo-incubator, so the way to use multiple memcached servers is the same for all projects." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_memcached.xml:21(para) -msgid "Example configuration with two hosts:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_memcached.xml:23(para) -msgid "By default, controller1 handles the caching service but if the host goes down, controller2 does the job. For more information about Memcached installation, see the OpenStack Cloud Administrator Guide." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:7(title) -msgid "Run OpenStack API and schedulers" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:9(title) -msgid "API services" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:10(para) -msgid "All OpenStack projects have an API service for controlling all the resources in the Cloud. In active/active mode, the most common setup is to scale out these services on at least two nodes and to use load balancing and a virtual IP address (with HAProxy and Keepalived in this setup)." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:15(para) -msgid "To use highly available and scalable API services, we need to ensure that:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:19(para) -msgid "You use virtual IP addresses when configuring OpenStack Identity endpoints." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:24(para) -msgid "All OpenStack configuration files should refer to virtual IP addresses." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:29(para) -msgid "The monitor check is quite simple since it just establishes a TCP connection to the API port. Comparing to the active/passive mode using Corosync and resource agents, we do not check if the service is actually running. That is why all OpenStack API services should be monitored by another tool, for example Nagios." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:38(title) -msgid "Schedulers" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:39(para) -msgid "OpenStack schedulers are used to determine how to dispatch compute, network, and volume requests. The most common setup is to use RabbitMQ as a messaging system. Those services are connected to the messaging back end and can scale out:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:45(systemitem) -msgid "nova-scheduler" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:48(systemitem) -msgid "nova-conductor" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:51(systemitem) -msgid "cinder-scheduler" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:54(systemitem) -msgid "neutron-server" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:57(systemitem) -msgid "ceilometer-collector" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:60(systemitem) -msgid "heat-engine" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:63(para) -msgid "Please refer to the RabbitMQ section for configuring these services with multiple messaging servers." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:67(title) -msgid "Telemetry Central agent" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_controllers/section_run_openstack_api_and_schedulers.xml:68(para) -msgid "The Telemetry Central agent can be configured to partition its polling workload between multiple agents, enabling high availability. Please refer to this section of the OpenStack Cloud Administrator Guide for the requirements and implementation details of this configuration." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_metadata_agent.xml:7(title) -msgid "Run neutron metadata agent" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_metadata_agent.xml:8(para) -msgid "There is no native feature to make this service highly available. At this time, the Active / Passive solution exists to run the neutron metadata agent in failover mode with Pacemaker. See the active/passive section of this guide." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:7(title) -msgid "Run neutron L3 agent" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:8(para) -msgid "The neutron L3 agent is scalable, due to the scheduler that allows distribution of virtual routers across multiple nodes. The following options are available to make a router highly available:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:14(para) -msgid "Automatic L3 agent failover for routers via the configuration option in /etc/neutron/neutron.conf." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:19(para) -msgid "Use Layer 3 High Availability with VRRP. The following configuration options need to be set in /etc/neutron/neutron.conf to enable it:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:28(th) -msgid "Option" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:29(th) -msgid "Value to set" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:30(th) -msgid "Description" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:36(para) -msgid "l3_ha" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:42(para) -msgid "All routers will be highly available by default." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:47(para) -msgid "max_l3_agents_per_router" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:50(para) ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:63(para) -msgid "2" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:53(para) -msgid "Maximum number of network nodes to be used for the HA router. The value can be larger than 2 but needs to be at least 2." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:60(para) -msgid "min_l3_agents_per_router" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:66(para) -msgid "Minimum number of network nodes to be used for the HA router. A new router creation will fail unless there are at least network nodes available. The value should not be less than 2." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_l3_agent.xml:76(para) -msgid "Using the active/passive solution to run the Neutron L3 agent in failover mode with Pacemaker. See the active/passive section of this guide." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_lbaas_agent.xml:7(title) -msgid "Run neutron LBaaS agent" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_lbaas_agent.xml:8(para) -msgid "Currently, there's no native feature to make the LBaaS agent highly available using the default plug-in HAProxy. A common way to make HAProxy highly available is to use the VRRP (Virtual Router Redundancy Protocol). Unfortunately, this is not yet implemented in the LBaaS HAProxy plug-in." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_dhcp_agent.xml:7(title) -msgid "Run neutron DHCP agent" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_network/section_run_neutron_dhcp_agent.xml:8(para) -msgid "The OpenStack Networking service has a scheduler that lets you run multiple agents across nodes. Also, the DHCP agent can be natively highly available. You can configure the number of DHCP agents per network using the parameter in neutron.conf. By default this is equal to 1. To achieve high availability assign more than one DHCP agent per network." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:7(title) -msgid "Highly available neutron L3 agent" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:9(para) -msgid "The neutron L3 agent provides L3/NAT forwarding to ensure external network access for VMs on tenant networks. High availability for the L3 agent is achieved by adopting Pacemaker." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:13(para) -msgid "Here is the documentation for installing neutron L3 agent." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:18(title) -msgid "Add neutron L3 agent resource to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:24(para) -msgid "You may now proceed with adding the Pacemaker configuration for neutron L3 agent resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:34(para) -msgid "p_neutron-l3-agent, a resource for manage Neutron L3 Agent service" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:41(para) -msgid "Once completed, commit your configuration changes by entering commit from the crm configure menu. Pacemaker will then start the neutron L3 agent service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml:45(para) -msgid "This method does not ensure a zero downtime since it has to recreate all the namespaces and virtual routers on the node." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:6(title) -msgid "Highly available neutron metadata agent" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:7(para) -msgid "Neutron metadata agent allows Compute API metadata to be reachable by VMs on tenant networks. High availability for the metadata agent is achieved by adopting Pacemaker." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:11(para) -msgid "Here is the documentation for installing Neutron Metadata Agent." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:14(title) -msgid "Add neutron metadata agent resource to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:19(para) -msgid "You may now proceed with adding the Pacemaker configuration for neutron metadata agent resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:29(para) -msgid "p_neutron-metadata-agent, a resource for manage Neutron Metadata Agent service" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:34(para) -msgid "crm configure supports batch input, so you may copy and paste the above into your live Pacemaker configuration, and then make changes as required." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml:37(para) -msgid "Once completed, commit your configuration changes by entering commit from the crm configure menu. Pacemaker will then start the neutron metadata agent service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:7(title) -msgid "Highly available neutron DHCP agent" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:9(para) -msgid "The neutron DHCP agent distributes IP addresses to the VMs with dnsmasq (by default). High availability for the DHCP agent is achieved by adopting Pacemaker." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:13(para) -msgid "Here is the documentation for installing neutron DHCP agent." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:18(title) -msgid "Add neutron DHCP agent resource to Pacemaker" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:24(para) -msgid "You may now proceed with adding the Pacemaker configuration for neutron DHCP agent resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:31(para) -msgid "This configuration creates:" -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:34(para) -msgid "p_neutron-agent-dhcp, a resource for managing the neutron DHCP Agent service." -msgstr "" - -#: ./doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml:41(para) -msgid "Once completed, commit your configuration changes by entering commit from the crm configure menu. Pacemaker will then start the neutron DHCP agent service, and its dependent resources, on one of your nodes." -msgstr "" - -#: ./doc/high-availability-guide/network/section_manage_network_resources.xml:7(title) -msgid "Manage network resources" -msgstr "" - -#: ./doc/high-availability-guide/network/section_manage_network_resources.xml:9(para) -msgid "You can now add the Pacemaker configuration for managing all network resources together with a group. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:7(title) -msgid "MariaDB with Galera (Red Hat-based platforms)" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:8(para) -msgid "MariaDB with Galera provides synchronous database replication in an active-active, multi-master environment. High availability for the data itself is managed internally by Galera, while access availability will be managed by HAProxy." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:12(para) -msgid "This guide assumes that three nodes are used to form the MariaDB Galera cluster. Unless otherwise specified, all commands need to be executed on all cluster nodes." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:17(title) -msgid "To install MariaDB with Galera" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:19(para) -msgid "Red Hat-based distributions include Galera packages in their repositories. To install the most current version of the packages, run the following command:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:26(para) -msgid "(Optional) Configure the clustercheck utility." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:27(para) -msgid "If HAProxy is used to load-balance client access to MariaDB, as described in the HAProxy section of this document, you can use the clustercheck utility to improve health checks." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:34(para) -msgid "Create file etc/sysconfig/clustercheck with the following contents:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:38(replaceable) ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:70(replaceable) -msgid "PASSWORD" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:42(para) -msgid "Make sure a sensible password is used." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:46(para) -msgid "Configure monitor service (used by HAProxy):" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:47(para) -msgid "Create file /etc/xinetd.d/galera-monitor with the following contents:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:68(para) -msgid "Create the database user required by clustercheck:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:74(para) -msgid "Start xinetd (required by clustercheck):" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:82(para) -msgid "Configure MariaDB with Galera." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:85(para) -msgid "Create the Galera configuration file /etc/my.cnf.d/galera.cnf with the following contents:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:98(replaceable) -msgid "NODE_IP" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:101(replaceable) ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:67(replaceable) ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:69(replaceable) -msgid "PRIMARY_NODE_IP" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:101(replaceable) ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:67(replaceable) -msgid "SECONDARY_NODE_IP" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:101(replaceable) -msgid "TERTIARY_NODE_IP" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:116(para) -msgid "Open firewall ports used for MariaDB and Galera communications:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:131(para) -msgid "Start MariaDB cluster:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:134(para) -msgid "On node 1:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:138(para) -msgid "On nodes 2 and 3:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:142(para) -msgid "Once the output from clustercheck is on all nodes, restart MariaDB on node 1:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mariadb_galera_rh.xml:144(replaceable) -msgid "mysql PIDs" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:7(title) -msgid "MySQL with Galera" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:8(para) -msgid "Rather than starting with a vanilla version of MySQL, and then adding Galera, you will want to install a version of MySQL patched for wsrep (Write Set REPlication) from https://launchpad.net/codership-mysql. The wsrep API is suitable for configuring MySQL High Availability in OpenStack because it supports synchronous replication." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:13(para) -msgid "Note that the installation requirements call for careful attention. Read the guide https://launchpadlibrarian.net/66669857/README-wsrep to ensure you follow all the required steps." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:16(para) -msgid "And for any additional information about Galera, you can access this guide: http://galeracluster.com/documentation-webpages/gettingstarted.html" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:19(title) -msgid "Installing Galera through a MySQL version patched for wsrep:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:21(para) -msgid "Setup the repository for Ubuntu 14.04 \"trusty\" (most recent). Install the software properties, the key, and the repository:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:30(para) -msgid "You can change the mirror to one near you on: downloads.mariadb.org" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:37(para) -msgid "Update your system and install the required packages: " -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:43(para) -msgid "If you have mariaDB already installed you need to re-apply all the permissions from the installation guide. It will purge all privileges!" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:49(para) -msgid "Adjust the configuration:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:50(para) -msgid "In the /etc/mysql/my.conf file, make the following changes:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:58(para) -msgid "Create the /etc/mysql/conf.d/wsrep.cnf file." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:62(para) -msgid "Paste the following lines in this file:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:70(replaceable) -msgid "NODE_NAME" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:72(para) -msgid "Replace PRIMARY_NODE_IP and SECONDARY_NODE_IP with the IP addresses of your primary and secondary servers." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:77(para) -msgid "Replace PRIMARY_NODE_IP with the hostname of the server. This is set for logging." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:80(para) -msgid "Copy this file to all other databases servers and change the value of wsrep_cluster_address and wsrep_node_name accordingly." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:83(para) -msgid "Start mysql as root and execute the following queries:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:85(para) -msgid "Remove user accounts with empty user names because they cause problems:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:89(para) -msgid "Check that the nodes can access each other through the firewall. Depending on your environment, this might mean adjusting iptables, as in:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:98(para) -msgid "This might also mean configuring any NAT firewall between nodes to allow direct connections. You might need to disable SELinux, or configure it to allow mysqld to listen to sockets at unprivileged ports." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:105(para) -msgid "For the next step create a back-up file of the debian.cnf file in /etc/mysql on all database servers. Should something go wrong just copy the back-up file back." -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:110(para) -msgid "Make sure you have SSH root access on the other servers. From the primary database server, copy the debian.cnf file to all other servers by running the following command:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:114(para) -msgid "After the copy make sure that all files are the same, you can do this by using the following command:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:116(para) -msgid "From the debian.cnf get the database password:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:118(para) -msgid "The result will look like this:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:131(para) -msgid "The below query should be run on every server except the primary node. This will make sure that you can restart the database again. Do not forget to add the password from the debian.cnf. To do this, run:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:137(para) -msgid "Stop all the mysql servers and start the first server with the following command:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:139(para) -msgid "All other nodes can now be started using:" -msgstr "" - -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:141(para) -msgid "Verify the wsrep replication by logging in as root under mysql and running the following command:" -msgstr "" - -#. Put one translator per line, in the form of NAME , YEAR1, YEAR2 -#: ./doc/high-availability-guide/ha_aa_db/section_mysql_galera.xml:0(None) -msgid "translator-credits" -msgstr "" - diff --git a/doc/high-availability-guide/locale/ja.po b/doc/high-availability-guide/locale/ja.po deleted file mode 100644 index e3c06e38..00000000 --- a/doc/high-availability-guide/locale/ja.po +++ /dev/null @@ -1,1602 +0,0 @@ -# Translators: -# Daisuke Matsui , 2015 -# Sasuke(Kyohei MORIYAMA) <>, 2014 -# Sasuke(Kyohei MORIYAMA) <>, 2015 -# Tomoyuki KATO , 2013-2015 -# -# OpenStack Infra , 2015. #zanata -msgid "" -msgstr "" -"Project-Id-Version: PACKAGE VERSION\n" -"POT-Creation-Date: 2015-09-15 07:40+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-28 01:39+0000\n" -"Last-Translator: Daisuke Matsui \n" -"Language-Team: Japanese (http://www.transifex.com/openstack/openstack-" -"manuals-i18n/language/ja/)\n" -"Language: ja\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Zanata 3.7.1\n" - -msgid "2" -msgstr "2" - -msgid "2012" -msgstr "2012" - -msgid "2012-01-16" -msgstr "2012-01-16" - -msgid "2012-05-24" -msgstr "2012-05-24" - -msgid "2013" -msgstr "2013" - -msgid "2014" -msgstr "2014" - -msgid "2014-04-17" -msgstr "2014-04-17" - -msgid "2014-05-16" -msgstr "2014-05-16" - -msgid "2014-10-17" -msgstr "2014-10-17" - -msgid "2015-04-30" -msgstr "2015-04-30" - -msgid "Memcached service" -msgstr "Memcached サービス" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live Pacemaker configuration, and then make " -"changes as required." -msgstr "" -"crm configure は、パッチ入力をサポートするため、Pacemaker " -"の設定に上の内容をコピーして貼り付け、必要に応じて変更できます。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live Pacemaker configuration, and then make " -"changes as required. For example, you may enter edit p_ip_glance-" -"api from the crm configure menu and edit the " -"resource to match your preferred virtual IP address." -msgstr "" -"crm configure は、パッチ入力をサポートするため、Pacemaker " -"の設定に上の内容をコピーして貼り付け、必要に応じて変更できます。例えば、" -"crm configure メニューから edit p_ip_glance-api を入力し、リソースを仮想 IP アドレスと一致させるために編集できます。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live pacemaker configuration, and then make " -"changes as required." -msgstr "" -"crm configure はバッチ入力をサポートします。そのため、現在" -"の pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live pacemaker configuration, and then make " -"changes as required. For example, you may enter edit p_ip_cinder-" -"api from the crm configure menu and edit the " -"resource to match your preferred virtual IP address." -msgstr "" -"crm configure はバッチ入力をサポートします。そのため、現在" -"の pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。例え" -"ば、お好みの仮想 IP アドレスに一致させるために、crm configure メニューから edit p_ip_cinder-api と入力し、リ" -"ソースを編集できます。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live pacemaker configuration, and then make " -"changes as required. For example, you may enter edit p_ip_keystone from the crm configure menu and edit the " -"resource to match your preferred virtual IP address." -msgstr "" -"crm configure はバッチ入力をサポートします。そのため、現在" -"の pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。例え" -"ば、お好みの仮想 IP アドレスに一致させるために、crm configure メニューから edit p_ip_keystone と入力し、リソー" -"スを編集できます。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live pacemaker configuration, and then make " -"changes as required. For example, you may enter edit p_neutron-" -"server from the crm configure menu and edit the " -"resource to match your preferred virtual IP address." -msgstr "" -"crm configure はバッチ入力をサポートします。そのため、現在" -"の pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。例え" -"ば、お好みの仮想 IP アドレスに一致させるために、crm configure メニューから edit p_neutron-server と入力し、リ" -"ソースを編集できます。" - -msgid "" -"mysql DRBD resource configuration (/etc/drbd.d/" -"mysql.res)" -msgstr "" -"mysql DRBD リソース設定 (/etc/drbd.d/mysql.res)" - -msgid "" -"p_cinder-api, a resource for manage Block Storage API " -"service" -msgstr "" -"p_cinder-api, Block Storage API サービスを管理するためのリ" -"ソース" - -msgid "" -"p_glance-api, a resource for managing OpenStack Image API " -"service" -msgstr "" -"p_glance-api、OpenStack Image API サービスを管理するための" -"リソース" - -msgid "" -"p_neutron-agent-dhcp, a resource for managing the neutron " -"DHCP Agent service." -msgstr "" -"p_neutron-agent-dhcp, neutron DHCP agent サービスを管理す" -"るためのリソース。" - -msgid "" -"p_neutron-l3-agent, a resource for manage Neutron L3 " -"Agent service" -msgstr "" -"p_neutron-l3-agent, Neutron L3 Agent サービスを管理するた" -"めのリソース。" - -msgid "" -"p_neutron-metadata-agent, a resource for manage Neutron " -"Metadata Agent service" -msgstr "" -"p_neutron-metadata-agent, Neutron Metadata Agent サービス" -"を管理するためのリソース。" - -msgid "" -"rabbitmq DRBD resource configuration (/etc/drbd." -"d/rabbitmq.res)" -msgstr "" -"rabbitmq DRBD リソース設定 (/etc/drbd.d/" -"rabbitmq.res)" - -msgid "" -"fence-agents (Fedora only; all other distributions use " -"fencing agents from cluster-glue)" -msgstr "" -"fence-agents (Fedora のみ。他のすべてのディストリビュー" -"ションは、cluster-glue からフェンスエージェントを使用しま" -"す。)" - -msgid "" -"pacemaker (Note that the crm shell should be downloaded " -"separately.)" -msgstr "" -"pacemaker (crm シェルを別途ダウンロードする必要があるかも" -"しれないことに注意してください。)" - -msgid " (LSB)" -msgstr " (LSB)" - -msgid " (LSB, alternate)" -msgstr " (LSB, alternate)" - -msgid " (systemd)" -msgstr " (systemd)" - -msgid " (upstart)" -msgstr " (upstart)" - -msgid "" -"A crucial aspect of high availability is the elimination of single points of " -"failure (SPOFs). A SPOF is an individual piece of equipment or software " -"which will cause system downtime or data loss if it fails. In order to " -"eliminate SPOFs, check that mechanisms exist for redundancy of:" -msgstr "" -"高可用性の重要な側面は、単一障害点 (SPOF) を減らすことです。SPOF は、障害が発" -"生した場合にシステム停止やデータ損失を引き起こす、設備やソフトウェアの個々の" -"部品です。SPOF を削減するために、以下の冗長性に対するメカニズムを確認します。" - -msgid "" -"A typical active/active installation for a stateful service would include " -"redundant services with all instances having an identical state. For " -"example, updates to one instance of a database would also update all other " -"instances. This way a request to one instance is the same as a request to " -"any other. A load balancer manages the traffic to these systems, ensuring " -"that operational systems always handle the request." -msgstr "" -"一般的にステートレスサービスをアクティブ / アクティブにインストールすること" -"は、すべてのインスタンスが同じ状態を持つ冗長なサービスになることを含みます。" -"例えば、あるインスタンスのデータベースの更新は、他のすべてのインスタンスも更" -"新されます。このように、あるインスタンスへのリクエストは、他へのリクエストと" -"同じです。ロードバランサーがこれらのシステムのトラフィックを管理し、利用可能" -"なシステムが常にリクエストを確実に処理します。" - -msgid "" -"A typical active/passive installation for a stateful service maintains a " -"replacement resource that can be brought online when required. A separate " -"application (such as Pacemaker or Corosync) monitors these services, " -"bringing the backup online as necessary." -msgstr "" -"一般的にステートレスサービスをアクティブ / パッシブにインストールすると、必要" -"に応じてオンラインにできる置換リソースを維持します。 独立したアプリケーショ" -"ン (Pacemaker や Corosync など) がこれらのサービスを監視し、必要に応じてバッ" -"クアップ側をオンラインにします。" - -msgid "API node cluster stack" -msgstr "API ノードクラスタースタック" - -msgid "API services" -msgstr "API サービス" - -msgid "Accidental deletion or destruction of data." -msgstr "意図しないデータの削除や破損。" - -msgid "Active/Active" -msgstr "アクティブ/アクティブ" - -msgid "Active/Passive" -msgstr "アクティブ/パッシブ" - -msgid "Add Block Storage API resource to Pacemaker" -msgstr "Block Storage API リソースの Pacemaker への追加" - -msgid "Add MySQL resources to Pacemaker" -msgstr "MySQL リソースの Pacemaker への追加" - -msgid "Add OpenStack Identity resource to Pacemaker" -msgstr "OpenStack Identity リソースの Pacemaker への追加" - -msgid "Add OpenStack Image API resource to Pacemaker" -msgstr "OpenStack Image API リソースの Pacemaker への追加" - -msgid "Add OpenStack Networking Server resource to Pacemaker" -msgstr "OpenStack Networking Server リソースの Pacemaker への追加" - -msgid "Add RabbitMQ resources to Pacemaker" -msgstr "RabbitMQ リソースの Pacemaker への追加" - -msgid "Add neutron DHCP agent resource to Pacemaker" -msgstr "Neutron DHCP Agent リソースの Pacemaker への追加" - -msgid "Add neutron L3 agent resource to Pacemaker" -msgstr "Neutron L3 Agent リソースの Pacemaker への追加" - -msgid "Add neutron metadata agent resource to Pacemaker" -msgstr "Neutron Metadata Agent リソースの Pacemaker への追加" - -msgid "Add the Telemetry central agent resource to Pacemaker" -msgstr "Telemetry 中央エージェントリソースの Pacemaker への追加" - -msgid "Adjust the configuration:" -msgstr "設定を調整します。" - -msgid "After each change of this file, you should restart HAProxy." -msgstr "このファイルの変更後、HAProxy を再起動すべきです。" - -msgid "" -"After the copy make sure that all files are the same, you can do this by " -"using the following command:" -msgstr "" -"コピーした後、下に示したコマンドを実行することで、すべてのファイルが同じとい" -"うことを確認してください:" - -msgid "All OpenStack API services" -msgstr "すべての OpenStack API サービス" - -msgid "All OpenStack configuration files should refer to virtual IP addresses." -msgstr "すべてのOpenStack設定ファイルは、仮想IPアドレスを参照すべきです。" - -msgid "All OpenStack schedulers" -msgstr "すべての OpenStack スケジューラー" - -msgid "Applications and automatic service migration" -msgstr "アプリケーションおよびサービスの自動的なマイグレーション" - -msgid "Begin trunk designation." -msgstr "trunk 指定を開始しました。" - -msgid "" -"Check that the nodes can access each other through the firewall. Depending " -"on your environment, this might mean adjusting iptables, as in:" -msgstr "" -"最後に、ノードがファイアウォール越しにお互いにアクセスできることを確認しま" -"す。お使いの環境によっては、次のような iptables の調整を意味するかもしれませ" -"ん。" - -msgid "Cloud controller cluster stack" -msgstr "クラウドコントローラーのクラスタースタック" - -msgid "Configure Block Storage API service" -msgstr "Block Storage API サービスの設定" - -msgid "Configure DRBD" -msgstr "DRBD の設定" - -msgid "Configure OpenStack Identity service" -msgstr "OpenStack Identity Service の設定" - -msgid "Configure OpenStack Identity to listen on the VIP address," -msgstr "" -"OpenStack Identity がその仮想 IP アドレスでリッスンするよう設定します。" - -msgid "Configure OpenStack Image service API" -msgstr "OpenStack Image サービス API の設定" - -msgid "Configure OpenStack Networking server" -msgstr "OpenStack Networking Server の設定" - -msgid "Configure OpenStack Networking to listen on the virtual IP address," -msgstr "OpenStack Networking が仮想 IP アドレスをリッスンするよう設定します。" - -msgid "Configure OpenStack services for highly available MySQL" -msgstr "高可用性 MySQL のための OpenStack サービスの設定" - -msgid "Configure OpenStack services for highly available RabbitMQ" -msgstr "高可用性 RabbitMQ のための OpenStack サービスの設定" - -msgid "Configure OpenStack services to use Rabbit HA queues" -msgstr "RabbitMQ HA キューを使用するための OpenStack サービスの設定" - -msgid "Configure OpenStack services to use RabbitMQ" -msgstr "RabbitMQ を使用するための OpenStack サービスの設定" - -msgid "Configure OpenStack services to use high available OpenStack Image API" -msgstr "" -"高可用性 OpenStack Image Service API を使用するための OpenStack サービスの設" -"定" - -msgid "Configure OpenStack services to use highly available Block Storage API" -msgstr "高可用性 Block Storage API を使用するための OpenStack サービスの設定" - -msgid "" -"Configure OpenStack services to use highly available OpenStack Networking " -"server" -msgstr "" -"高可用性 OpenStack Networking を使用するための OpenStack サービスの設定" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Identity" -msgstr "高可用性 OpenStack Identity を使用するための OpenStack サービスの設定" - -msgid "Configure OpenStack services to use the virtual IP address." -msgstr "OpenStack のサービスが仮想 IP アドレスを使用するよう設定します。" - -msgid "Configure OpenStack services to use this IP address." -msgstr "OpenStack のサービスがこの IP アドレスを使用するよう設定します。" - -msgid "Configure Pacemaker group" -msgstr "Pacemaker グループの設定" - -msgid "Configure RabbitMQ" -msgstr "RabbitMQ の設定" - -msgid "Configure RabbitMQ for HA queues" -msgstr "高可用性 キュー用の RabbitMQ の設定" - -msgid "Configure Telemetry central agent service" -msgstr "Telemetry 中央エージェントサービスの設定" - -msgid "Configure the VIP" -msgstr "仮想 IP の設定" - -msgid "Configuring Block Storage to listen on the VIP address" -msgstr "Block Storage がその仮想 IP アドレスをリッスンする設定" - -msgid "Configuring OpenStack services to use this IP address" -msgstr "OpenStack のサービスがこの IP アドレスを使用する設定" - -msgid "Copyright details are filled in by the template." -msgstr "Copyright details are filled in by the template." - -msgid "Corosync configuration file (corosync.conf)" -msgstr "Corosync 設定ファイル (corosync.conf)" - -msgid "Create a file system" -msgstr "ファイルシステムの作成" - -msgid "Creating a file system" -msgstr "ファイルシステムの作成" - -msgid "Data loss" -msgstr "データ損失" - -msgid "Database" -msgstr "データベース" - -msgid "Description" -msgstr "説明" - -msgid "Do this configuration on all services using RabbitMQ:" -msgstr "RabbitMQ を使用するすべてのサービスでこの設定を行います。" - -msgid "Edit /etc/ceilometer/ceilometer.conf:" -msgstr "/etc/ceilometer/ceilometer.conf を編集します。" - -msgid "Edit /etc/cinder/cinder.conf:" -msgstr "/etc/cinder/cinder.conf を編集します。" - -msgid "Edit /etc/glance/glance-api.conf:" -msgstr "/etc/glance/glance-api.conf を編集します。" - -msgid "Edit /etc/neutron/neutron.conf:" -msgstr "/etc/neutron/neutron.conf を編集します。" - -msgid "Example configuration with two hosts:" -msgstr "2ホストの設定例" - -msgid "Facility services such as power, air conditioning, and fire protection" -msgstr "電源、空調、防火などに関する設備" - -msgid "" -"Finally, we need to create a service group to ensure that " -"the virtual IP is linked to the API services resources:" -msgstr "" -"最後に、仮想 IP が API サービスリソースに関連付けられていることを確認するため" -"に、サービス group を作成する必要があります。" - -msgid "First of all, you need to download the resource agent to your system:" -msgstr "" -"まず初めに、リソースエージェントをシステムにダウンロードする必要があります。" - -msgid "" -"First, you must select and assign a virtual IP address (VIP) that can freely " -"float between cluster nodes." -msgstr "" -"まず初めに、クラスターノード間で自由に移動できる仮想 IP アドレス (VIP) を選択" -"して割り当てる必要があります。" - -msgid "" -"For UDPU, every node that should be a member of the membership must be " -"specified." -msgstr "" -"UDPUでは、全てのノードがメンバーシップメンバーを指定しなければなりません。" - -msgid "" -"For example, you should configure OpenStack Compute for using highly " -"available OpenStack Networking server in editing nova.conf file:" -msgstr "" -"例えば、高可用性 Networking サーバーを使用するために、nova.conf ファイルを編集して OpenStack Compute を設定する必要があります :" - -msgid "HA using active/active" -msgstr "アクティブ/アクティブを使用した HA" - -msgid "HA using active/passive" -msgstr "アクティブ/パッシブを使用した HA" - -msgid "" -"HAProxy is a very fast and reliable solution offering high availability, " -"load balancing, and proxying for TCP and HTTP-based applications. It is " -"particularly suited for web sites crawling under very high loads while " -"needing persistence or Layer 7 processing. Supporting tens of thousands of " -"connections is clearly realistic with today’s hardware." -msgstr "" -"HAProxy は高可用性、負荷分散、TCP と HTTP ベースのアプリケーションに対するプ" -"ロキシを提供する非常に高速かつ信頼性のあるソリューションです。とくに永続性と" -"レイヤー 7 処理を必要とする、非常に負荷の高いウェブサイトに適しています。数千" -"の接続をサポートすることは、今日のハードウェアではかなり現実的です。" - -msgid "HAProxy nodes" -msgstr "HAProxy ノード" - -msgid "Here is an example for the HAProxy configuration file:" -msgstr "これはHAProxyの設定ファイル例です" - -msgid "" -"Here is the documentation " -"for installing Neutron Metadata Agent." -msgstr "" -"ここに Neutron Metadata エージェントをインストールするためのドキュメントがあります。" - -msgid "High Availability systems seek to minimize two things:" -msgstr "高可用性システムは、以下の 2 つの最小化を目指しています。" - -msgid "" -"High-availability systems typically achieve an uptime percentage of 99.99% " -"or more, which roughly equates to less than an hour of cumulative downtime " -"per year. In order to achieve this, high availability systems should keep " -"recovery times after a failure to about one to two minutes, sometimes " -"significantly less." -msgstr "" -"高可用性システムは、一般的に 99.99% 以上の稼働率を達成します。おそよ年間 1 時" -"間未満の停止時間になります。高可用性システムは、これを実現するために、障害発" -"生後の復旧時間を 1 ~ 2 分以内に、ときにはさらに短く抑えるべきです。" - -msgid "Highly available Block Storage API" -msgstr "高可用性 Block Storage API" - -msgid "Highly available MySQL" -msgstr "高可用性 MySQL" - -msgid "Highly available OpenStack Identity" -msgstr "高可用性 OpenStack Identity" - -msgid "Highly available OpenStack Image API" -msgstr "高可用性 OpenStack Image API" - -msgid "Highly available OpenStack Networking server" -msgstr "高可用性 OpenStack Networking サーバー" - -msgid "Highly available RabbitMQ" -msgstr "高可用性 RabbitMQ" - -msgid "Highly available Telemetry central agent" -msgstr "高可用性 Telemetry 中央エージェント" - -msgid "Highly available neutron DHCP agent" -msgstr "高可用性 Neutron DHCP Agent" - -msgid "Highly available neutron L3 agent" -msgstr "高可用性 Neutron L3 エージェント" - -msgid "Highly available neutron metadata agent" -msgstr "高可用性 Neutron Metadata Agent" - -msgid "How frequently to retry connecting with RabbitMQ:" -msgstr "RabbitMQ と再接続する頻度:" - -msgid "How long to back-off for between retries when connecting to RabbitMQ:" -msgstr "RabbitMQ に接続するとき再試行するまでにバックオフする間隔:" - -msgid "" -"If the is set to yes, the broadcast address is used for " -"communication. If this option is set, should not be set." -msgstr "" -"にyesが設定された場合、伝達にはブロードキャストアドレスが使わ" -"れます。このオプションを設定する場合、 を設定してはいけませ" -"ん。" - -msgid "" -"If the cluster is working, you can now proceed to creating users and " -"passwords for queues." -msgstr "" -"クラスターが動作していれば、キュー用のユーザーとパスワードを作成する手順に進" -"めます。" - -msgid "" -"If you are using both private and public IP addresses, you should create two " -"Virtual IP addresses and define your endpoint like this:" -msgstr "" -"プライベート IP とパブリック IP の両方を使用する場合、2 つの仮想 IP アドレス" -"を作成し、次のようにエンドポイントを定義すべきです。" - -msgid "" -"If you are using both private and public IP, you should create two Virtual " -"IPs and define your endpoint like this:" -msgstr "" -"プライベート IP とパブリック IP の両方を使用する場合、2 つの仮想 IP アドレス" -"を作成し、次のようにエンドポイントを定義すべきです。" - -msgid "" -"If you are using the horizon dashboard, you should edit the " -"local_settings.py file:" -msgstr "" -"Dashboard を使用している場合、local_settings.py ファイルを" -"編集する必要があります。" - -msgid "" -"If you change the configuration from an old setup which did not use HA " -"queues, you should interrupt the service:" -msgstr "" -"HA キューを使用していない古いセットアップから設定を変更した場合、サービスを中" -"断しなければいけません。" - -msgid "" -"In an active/active configuration, systems also use a backup but will manage " -"both the main and redundant systems concurrently. This way, if there is a " -"failure the user is unlikely to notice. The backup system is already online, " -"and takes on increased load while the main system is fixed and brought back " -"online." -msgstr "" -"アクティブ / アクティブの設定の場合、システムはバックアップ側も使用しますが、" -"メインと冗長システムを同時に管理します。このように、ユーザーが気が付かない障" -"害が発生した場合、バックアップシステムはすでにオンラインであり、メインシステ" -"ムが復旧され、オンラインになるまでの間は負荷が高くなります。" - -msgid "" -"In an active/passive configuration, systems are set up to bring additional " -"resources online to replace those that have failed. For example, OpenStack " -"would write to the main database while maintaining a disaster recovery " -"database that can be brought online in the event that the main database " -"fails." -msgstr "" -"アクティブ / パッシブの設定の場合、システムは故障したリソースを置き換えるため" -"に、オンラインで追加リソースをセットアップします。例えば、メインのデータベー" -"スが故障したときにオンラインになる災害対策データベースを維持する限り、" -"OpenStack はメインのデータベースに書き込みます。" - -msgid "" -"Initializes DRBD metadata and writes the initial set of metadata to " -"/dev/data/mysql. Must be completed on both nodes." -msgstr "" -"DRBD メタデータを初期化し、メタデータの初期セットを /dev/data/" -"mysql に書き込みます。両方のノードで完了する必要があります。" - -msgid "Install RabbitMQ" -msgstr "RabbitMQ のインストール" - -msgid "Install packages" -msgstr "パッケージのインストール" - -msgid "Installing Galera through a MySQL version patched for wsrep:" -msgstr "" -"wsrep 用パッチ適用済み MySQL バージョンの Galera をインストールします。" - -msgid "Introduction to OpenStack High Availability" -msgstr "OpenStack 高可用性の概要" - -msgid "" -"Making stateful services highly available can depend on whether you choose " -"an active/passive or active/active configuration." -msgstr "" -"ステートフルサービスの高可用性は、アクティブ / パッシブとアクティブ / アク" -"ティブのどちらの設定を選択するかに依存する可能性があります。" - -msgid "" -"Making the Telemetry central agent service highly available in active / " -"passive mode involves managing its daemon with the Pacemaker cluster manager." -msgstr "" -"Telemetry 中央エージェントサービスをアクティブ / パッシブモードで高可用性にす" -"ることは、Pacemaker クラスターマネージャーでそのデーモンを管理することが関連" -"します。" - -msgid "Manage network resources" -msgstr "ネットワークリソースの管理" - -msgid "" -"Manage the OpenStack Networking API Server daemon with the Pacemaker cluster " -"manager," -msgstr "" -"Pacemaker クラスターマネージャーを用いた OpenStack Networking API Server デー" -"モンの管理" - -msgid "" -"Maximum retries with trying to connect to RabbitMQ (infinite by default):" -msgstr "RabbitMQ に接続を試行する最大回数 (デフォルトで無制限):" - -msgid "Memcached" -msgstr "Memcached" - -msgid "" -"Memory caching is managed by oslo-incubator, so the way to use multiple " -"memcached servers is the same for all projects." -msgstr "" -"メモリーキャッシュは oslo-incubator により管理されています。そのため、複数の " -"memcached サーバーを使用する方法が、すべてのプロジェクトで同じになります。" - -msgid "" -"Minor cleanup of typos, otherwise no major revisions for Icehouse release." -msgstr "" -"誤字・脱字などの軽微な修正。Icehouse リリース向けの大きな改版はありません。" - -msgid "" -"Most high availability systems guarantee protection against system downtime " -"and data loss only in the event of a single failure. However, they are also " -"expected to protect against cascading failures, where a single failure " -"deteriorates into a series of consequential failures." -msgstr "" -"多くの高可用性システムは、単一障害事象のみにおいて、システム停止やデータ損失" -"に対する保護を保証します。しかしながら、単一障害が一連の障害を悪化させてい" -"く、段階的な障害に対しても保護されることが期待されます。" - -msgid "" -"Most high availability systems will fail in the event of multiple " -"independent (non-consequential) failures. In this case, most systems will " -"protect data over maintaining availability." -msgstr "" -"多くの高可用性システムは、複数の独立した (不連続な) 障害が発生すると停止しま" -"す。この場合、多くのシステムは可用性の維持よりデータの保護を優先します。" - -msgid "MySQL with Galera" -msgstr "Galera を用いた MySQL" - -msgid "NODE" -msgstr "NODE" - -msgid "NODE_IP" -msgstr "NODE_IP" - -msgid "NODE_NAME" -msgstr "NODE_NAME" - -msgid "Network components, such as switches and routers" -msgstr "スイッチやルーターなどのネットワークの構成要素" - -msgid "Network controller cluster stack" -msgstr "ネットワークコントローラーのクラスタースタック" - -msgid "Neutron DHCP agent" -msgstr "Neutron DHCP エージェント" - -msgid "Neutron L2 agent" -msgstr "Neutron L2 エージェント" - -msgid "Neutron L3 agent" -msgstr "neutron L3 エージェント" - -msgid "Neutron LBaaS agent" -msgstr "Neutron LBaaS エージェント" - -msgid "Neutron metadata agent" -msgstr "Neutron メタデータエージェント" - -msgid "" -"Neutron metadata agent allows Compute API metadata to be reachable by VMs on " -"tenant networks. High availability for the metadata agent is achieved by " -"adopting Pacemaker." -msgstr "" -"Neutron Metadata エージェントにより Nova API Metadata がプロジェクトのネット" -"ワークにある仮想マシンによりアクセスできるようになります。Metadata エージェン" -"トの高可用性は Pacemaker の適用により実現されます。" - -msgid "" -"No other changes are necessary to your OpenStack configuration. If the node " -"currently hosting your RabbitMQ experiences a problem necessitating service " -"failover, your OpenStack services may experience a brief RabbitMQ " -"interruption, as they would in the event of a network hiccup, and then " -"continue to run normally." -msgstr "" -"OpenStack の設定に他の変更は必要ありません。現在 RabbitMQ を稼働しているノー" -"ドが、サービスのフェイルオーバーを必要とする問題に遭遇した場合、ネットワーク" -"の中断と通常どおりの動作継続により、OpenStack のサービスは MySQL の短時間の中" -"断に遭遇するかもしれません。" - -msgid "" -"No other changes are necessary to your OpenStack configuration. If the node " -"currently hosting your database experiences a problem necessitating service " -"failover, your OpenStack services may experience a brief MySQL interruption, " -"as they would in the event of a network hiccup, and then continue to run " -"normally." -msgstr "" -"OpenStack の設定に他の変更は必要ありません。現在データベースを稼働している" -"ノードが、サービスのフェイルオーバーを必要とする問題に遭遇した場合、ネット" -"ワークの中断と通常どおりの動作継続により、OpenStack のサービスは MySQL の短時" -"間の中断に遭遇するかもしれません。" - -msgid "" -"Occurs when a user-facing service is unavailable beyond a specified maximum " -"amount of time." -msgstr "指定された最大時間を超えて、ユーザーサービスが利用不可能になること。" - -msgid "Official manual for installing RabbitMQ on Fedora and RHEL" -msgstr "Fedora と RHEL の RabbitMQ インストールに関する公式マニュアル" - -msgid "Official manual for installing RabbitMQ on Ubuntu and Debian" -msgstr "Ubuntu と Debian の RabbitMQ インストールに関する公式マニュアル" - -msgid "Official manual for installing RabbitMQ on openSUSE" -msgstr "openSUSE の RabbitMQ インストールに関する公式マニュアル" - -msgid "On Fedora and RHEL" -msgstr "Fedora と RHEL 上では" - -msgid "On Havana:" -msgstr "Havana の場合:" - -msgid "On SLES 12:" -msgstr "SLES 12 の場合:" - -msgid "On Ubuntu and Debian" -msgstr "Ubuntu / Debian の場合" - -msgid "" -"On any host that is meant to be part of a Pacemaker cluster, you must first " -"establish cluster communications through the Corosync messaging layer. This " -"involves installing the following packages (and their dependencies, which " -"your package manager will normally install automatically):" -msgstr "" -"Pacemaker クラスターに参加させるすべてのホストにおいて、まず Corosync メッ" -"セージング層によるクラスター通信を確立する必要があります。これは、以下のパッ" -"ケージをインストールする必要があります。(また通常、パッケージ管理ソフトウェア" -"が自動的にそれらに依存するものをインストールします):" - -msgid "On node 1:" -msgstr "ノード1上:" - -msgid "On nodes 2 and 3:" -msgstr "ノード2と3上:" - -msgid "On openSUSE and SLES" -msgstr "openSUSE / SLES の場合" - -msgid "On openSUSE:" -msgstr "openSUSE の場合:" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the Block Storage API service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"完了すると、crm configure メニューから commit と入力し、設定の変更をコミットします。Pacemaker は Block Storage " -"API サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the Ceilometer Central Agent service, and its " -"dependent resources, on one of your nodes." -msgstr "" -"完了すると、crm configure メニューから commit と入力し、設定の変更をコミットします。Pacemaker は Ceilometer 中央" -"エージェント サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the OpenStack Identity service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"完了すると、crm configure メニューから commit と入力し、設定の変更をコミットします。Pacemaker は OpenStack " -"Identity サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the OpenStack Image API service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"完了すると、crm configure メニューから commit と入力し、設定の変更をコミットします。Pacemaker は OpenStack Image " -"API サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the OpenStack Networking API service, and its " -"dependent resources, on one of your nodes." -msgstr "" -"完了すると、crm configure メニューから commit と入力し、設定の変更をコミットします。Pacemaker は OpenStack " -"Networking API サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the neutron DHCP agent service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"完了すると、crm configure メニューから commit と入力し、設定の変更をコミットします。Pacemaker は Neutron DHCP " -"Agent サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the neutron L3 agent service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"完了すると、crm configure メニューから commit と入力し、設定の変更をコミットします。Pacemaker は Neutron L3 Agent " -"サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the neutron metadata agent service, and its " -"dependent resources, on one of your nodes." -msgstr "" -"完了すると、crm configure メニューから commit と入力し、設定の変更をコミットします。Pacemaker は Neutron Metadata " -"Agent サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once completed, you may safely return the device to the secondary role. Any " -"ongoing device synchronization will continue in the background:" -msgstr "" -"一度完了すると、デバイスを安全にセカンダリロールに戻せます。すべての動作中の" -"デバイス同期はバックエンドで継続されます。" - -msgid "" -"Once the DRBD resource is running and in the primary role (and potentially " -"still in the process of running the initial device synchronization), you may " -"proceed with creating the filesystem for RabbitMQ data. XFS is generally the " -"recommended filesystem:" -msgstr "" -"DRBD リソースが実行中になり、プライマリロールになると (まだ初期デバイス同期が" -"実行中かもしれません)、RabbitMQ データのファイルシステムの作成を進められま" -"す。XFS が一般的に推奨されるファイルシステムです。" - -msgid "" -"Once you have made these changes, you may commit the " -"updated configuration." -msgstr "" -"これらの変更を実行すると、更新した設定を commit できます。" - -msgid "OpenStack" -msgstr "OpenStack" - -msgid "OpenStack Block Storage" -msgstr "OpenStack Block Storage" - -msgid "OpenStack Compute" -msgstr "OpenStack Compute" - -msgid "OpenStack Contributors" -msgstr "OpenStack 貢献者" - -msgid "OpenStack High Availability Guide" -msgstr "OpenStack 高可用性ガイド" - -msgid "" -"OpenStack Identity is the Identity Service in OpenStack and used by many " -"services. Making the OpenStack Identity service highly available in active / " -"passive mode involves" -msgstr "" -"OpenStack Identity は OpenStack における認証サービスです。OpenStack Identity " -"Service をアクティブ / パッシブモードで高可用性にすることは、次のことが関連し" -"ます。" - -msgid "OpenStack Networking" -msgstr "OpenStack Networking" - -msgid "" -"OpenStack Networking is the network connectivity service in OpenStack. " -"Making the OpenStack Networking Server service highly available in active / " -"passive mode involves the following tasks:" -msgstr "" -"OpenStack Networking は、OpenStack におけるネットワーク接続性のサービスです。" -"OpenStack Networking サーバーのサービスをアクティブ/パッシブモードで高可用化" -"することには、以下の事項が関連します。" - -msgid "OpenStack controller nodes" -msgstr "OpenStack コントローラーノード" - -msgid "OpenStack controller nodes contain:" -msgstr "OpenStack コントローラーノードは次のものを含みます。" - -msgid "" -"OpenStack currently meets such availability requirements for its own " -"infrastructure services, meaning that an uptime of 99.99% is feasible for " -"the OpenStack infrastructure proper. However, OpenStack does not guarantee " -"99.99% availability for individual guest instances." -msgstr "" -"OpenStack 自体のインフラストラクチャーは、現在その可用性要件を満たせます。つ" -"まり、適切な OpenStack インフラストラクチャーの 99.99% の稼働率が実現可能で" -"す。しかしながら、OpenStack は個々のゲストインスタンスの可用性 99.99% を保証" -"できません。" - -msgid "OpenStack network nodes" -msgstr "OpenStack ネットワークノード" - -msgid "OpenStack network nodes contain:" -msgstr "OpenStack ネットワークノードは次のものを含みます。" - -msgid "Option" -msgstr "オプション" - -msgid "Organizes guide based on cloud controller and compute nodes." -msgstr "" -"クラウドコントローラーとコンピュートノードに基づいてガイドを整理しました。" - -msgid "PASSWORD" -msgstr "PASSWORD" - -msgid "PRIMARY_NODE_IP" -msgstr "PRIMARY_NODE_IP" - -msgid "Paste the following lines in this file:" -msgstr "このファイル内へ下に示した行を貼り付けます" - -msgid "Prepare MySQL for Pacemaker high availability" -msgstr "Pacemaker 高可用性のための MySQL の準備" - -msgid "Prepare RabbitMQ for Pacemaker high availability" -msgstr "Pacemaker 高可用性のための RabbitMQ の準備" - -msgid "" -"Preventing single points of failure can depend on whether or not a service " -"is stateless." -msgstr "" -"単一障害点を無くすことは、サービスがステートレスであるかどうかに依存する可能" -"性があります。" - -msgid "RabbitMQ" -msgstr "RabbitMQ" - -msgid "RabbitMQ HA cluster host:port pairs:" -msgstr "RabbitMQ HA クラスターの host:port の組:" - -msgid "RabbitMQ is packaged on both distros:" -msgstr "RabbitMQ がどちらもパッケージ化されています。" - -msgid "" -"RabbitMQ is the default AMQP server used by many OpenStack services. Making " -"the RabbitMQ service highly available involves the following steps:" -msgstr "" -"RabbitMQ が多くの OpenStack サービスにより使用される標準の AMQP サーバーで" -"す。RabbitMQ サービスを高可用性にすることは、以下の手順が関連します。" - -msgid "" -"RabbitMQ is the default AMQP server used by many OpenStack services. Making " -"the RabbitMQ service highly available involves:" -msgstr "" -"RabbitMQ が多くの OpenStack サービスにより使用される標準の AMQP サーバーで" -"す。RabbitMQ サービスを高可用性にすることは、次のことが関連します。" - -msgid "" -"Regardless of the approach, the steps outlined here must be completed on " -"only one cluster node." -msgstr "" -"その方法に関わらず、ここに概要が示された手順は一つだけのクラスターノードで完" -"了する必要があります。" - -msgid "Remove user accounts with empty user names because they cause problems:" -msgstr "" -"また、問題を引き起こす可能性があるので、空のユーザー名を用いてユーザーアカウ" -"ントを削除します。" - -msgid "Run OpenStack API and schedulers" -msgstr "OpenStack API & スケジューラーの実行" - -msgid "Run neutron DHCP agent" -msgstr "Neutron DHCP エージェントの実行" - -msgid "Run neutron L3 agent" -msgstr "Neutron L3 エージェントの実行" - -msgid "Run neutron LBaaS agent" -msgstr "neutron LBaaS エージェントの実行" - -msgid "Run neutron metadata agent" -msgstr "Neutron メタデータエージェントの実行" - -msgid "SECONDARY_NODE_IP" -msgstr "SECONDARY_NODE_IP" - -msgid "Schedulers" -msgstr "スケジューラー" - -msgid "Services currently working with HA queues:" -msgstr "現在、サービスは HA キューを用いて動作しています。" - -msgid "Set basic cluster properties" -msgstr "基本的なクラスターのプロパティの設定" - -msgid "Set up Corosync" -msgstr "Corosync のセットアップ" - -msgid "" -"Some environments may not support multicast. For such cases, Corosync should " -"be configured for unicast. An example fragment of the Corosync configuration " -"file is shown below:" -msgstr "" -"いくつかの環境ではマルチキャストをサポートしていないかもしれません。そのよう" -"な場合のため、Corosyncはユニキャストで設定することができます。以下に、設定" -"ファイルの例の一部を示します。" - -msgid "Start Pacemaker" -msgstr "Pacemaker の開始" - -msgid "Start mysql as root and execute the following queries:" -msgstr "rootでmysqlを起動し、下に示したクエリを実行してください:" - -msgid "Starting Corosync" -msgstr "Corosync の開始" - -msgid "Stateless vs. Stateful services" -msgstr "ステートレスサービスとステートフルサービス" - -msgid "Storage components" -msgstr "ストレージ構成要素" - -msgid "System downtime" -msgstr "システム停止時間" - -msgid "TERTIARY_NODE_IP" -msgstr "TERTIARY_NODE_IP" - -msgid "Telemetry" -msgstr "Telemetry" - -msgid "" -"Telemetry (ceilometer) is the metering and monitoring service in OpenStack. " -"The Central agent polls for resource utilization statistics for resources " -"not tied to instances or compute nodes." -msgstr "" -"Telemetry (ceilometer) は OpenStack のメータリングとモニタリングのサービスで" -"す。中央エージェントは、インスタンスやコンピュートノードに結びつけられていな" -"いリソースに対して、リソースの利用状況の統計情報を収集します。" - -msgid "" -"The service declaration for the pacemaker service may be placed in the corosync.conf " -"file directly, or in its own separate file, /etc/corosync/service." -"d/pacemaker." -msgstr "" -"pacemaker サービスに関する service 宣言" -"は、corosync.conf ファイルに直接置かれるか、別のファイ" -"ル /etc/corosync/service.d/pacemaker に置かれるかもしれ" -"ません。" - -msgid "" -"The configuration option is optional when using IPv4 and " -"required when using IPv6. This is a 32-bit value specifying the node " -"identifier delivered to the cluster membership service. When using IPv4, " -"this defaults to the 32-bit IP address to which the system is bound with the " -"ring identifier of 0. The node identifier value of zero is reserved and " -"should not be used." -msgstr "" -"はIPv4を使用する場合はオプション設定ですが、IPv6を使用する場" -"合は必須です。このオプションはクラスターメンバーシップサービス内で、ノードの" -"識別子として配信される32bitの値を指定します。IPv4を使用している場合、デフォル" -"トではシステムがリング識別子0に割り当てた32bitのIPアドレスとなります。ノード" -"識別子の値0は予約済みとなっており、使用するべきではありません。" - -msgid "" -"The directive controls the transport mechanism used. To " -"avoid the use of multicast entirely, a unicast transport parameter " -" should be specified. This requires specifying the list of " -"members in directive; this could potentially make up the " -"membership before deployment. The default is . The transport " -"type can also be set to or ." -msgstr "" -"ディレクティブは使用するトランスポートメカニズム。 マルチキャ" -"ストを完全に無効にするためには、ユニキャストトランスポートパラメータ" -"を指定しなければなりません。 ディレクティブで" -"メンバーのリストを指定する必要があります。展開する前にメンバーシップを構成す" -"ることができます。デフォルトは です。トランスポートタイプは" -" または を設定することができます。" - -msgid "" -"The specifies the IP address of one of the nodes. X is the " -"ring number." -msgstr "" -"でノードのIPアドレスのうち1つを指定します。Xはリングの番号で" -"す。" - -msgid "" -"The utility can be used to dump the Corosync cluster member " -"list:" -msgstr "" -" ユーティリティーは、Corosync クラスターメンバー一覧を出力す" -"るために使用できます。" - -msgid "" -"The API node exposes OpenStack API endpoints onto external network " -"(Internet). It must talk to the cloud controller on the management network." -msgstr "" -"API ノードは外部ネットワーク (インターネット) にある OpenStack API エンドポイ" -"ントにさらされています。管理ネットワークでクラウドコントローラーと通信する必" -"要があります。" - -msgid "" -"The Pacemaker based MySQL server requires a DRBD resource from which it " -"mounts the /var/lib/mysql directory. In this example, the " -"DRBD resource is simply named mysql:" -msgstr "" -"Pacemaker ベースの MySQL サーバーは /var/lib/mysql ディレ" -"クトリをマウントする DRBD リソースが必要です。この例では、DRBD リソースが単" -"に mysql という名前になっています。" - -msgid "The Pacemaker cluster stack" -msgstr "Pacemaker クラスタースタック" - -msgid "" -"The cloud controller runs on the management network and must talk to all " -"other services." -msgstr "" -"クラウドコントローラーは、管理ネットワークで動作し、他のすべてのサービスと通" -"信できる必要があります。" - -msgid "" -"The network controller sits on the management and data network, and needs to " -"be connected to the Internet if an instance will need access to the Internet." -msgstr "" -"ネットワークコントローラーは、管理ネットワークとデータネットワークに接続され" -"ます。インスタンスがインターネットにアクセスする必要がある場合、これをイン" -"ターネットに接続する必要があります。" - -msgid "" -"The neutron DHCP agent distributes IP addresses to the VMs with dnsmasq (by " -"default). High availability for the DHCP agent is achieved by adopting " -"Pacemaker." -msgstr "" -"neutron DHCP エージェントは (デフォルトで) dnsmasq を用いて仮想マシンに IP ア" -"ドレスを配布します。DHCP エージェントの高可用性は Pacemaker の適用により実現" -"されます。" - -msgid "" -"The neutron L2 agent does not need to be highly available. It has to be " -"installed on each data forwarding node and controls the virtual networking " -"drivers as Open vSwitch or Linux Bridge. One L2 agent runs per node and " -"controls its virtual interfaces. That's why it cannot be distributed and " -"highly available." -msgstr "" -"neutron L2 エージェントは、高可用化する必要がありません。各データ転送ノードに" -"インストールする必要があります。仮想ネットワークドライバーとして Open " -"vSwitch や Linux Bridge を制御します。L2 エージェントは、ノードごとに 1 つ動" -"作し、その仮想インターフェースを制御します。そのため、分散できず、高可用化で" -"きません。" - -msgid "" -"The neutron L3 agent provides L3/NAT forwarding to ensure external network " -"access for VMs on tenant networks. High availability for the L3 agent is " -"achieved by adopting Pacemaker." -msgstr "" -"Neutron L3 エージェントは、プロジェクトのネットワークにある仮想マシンが確実に" -"外部ネットワークにアクセスできるように、L3/NAT 転送機能を提供します。" - -msgid "" -"The packages are signed by GPG key 893A90DAD85F9316. You should verify the " -"fingerprint of the imported GPG key before using it." -msgstr "" -"パッケージは GPG キー 893A90DAD85F9316 を用いて署名されています。使用する前に" -"インポートした GPG キーのフィンガープリントを検証すべきです。" - -msgid "The result will look like this:" -msgstr "結果はこのようになります:" - -msgid "Then, set the following properties:" -msgstr "そして、以下のプロパティを設定します。" - -msgid "" -"There are several things to note about the recommended interface " -"configuration:" -msgstr "インターフェースの推奨設定に関する注意事項がいくつかあります。" - -msgid "" -"These are some of the more common ways to implement these high availability " -"architectures, but they are by no means the only ways to do it. The " -"important thing is to make sure that your services are redundant, and " -"available; how you achieve that is up to you. This document will cover some " -"of the more common options for highly available systems." -msgstr "" -"これらの高可用性アーキテクチャーを実現する、より一般的な方法がいくつかありま" -"すが、それらは唯一の方法ではありません。重要なことは、サービスが冗長であり、" -"利用可能であることを確実にすることです。あなたがしたいように達成します。この" -"ドキュメントは高可用性システムのより一般的なオプションをいくつか取り扱いま" -"す。" - -msgid "This configuration creates" -msgstr "この設定により、次のものが作成されます。" - -msgid "" -"This configuration creates p_keystone, a resource for " -"managing the OpenStack Identity service." -msgstr "" -"この設定は OpenStack Identity サービスを管理するためのリソース " -"p_keystone を作成します。" - -msgid "" -"This configuration creates p_neutron-server, a resource " -"for manage OpenStack Networking Server service" -msgstr "" -"この設定は OpenStack Networking サーバーサービスを管理するためのリソース " -"p_neutron-server を作成します。" - -msgid "This configuration creates:" -msgstr "この設定により、以下が作成されます。" - -msgid "" -"This guide describes how to install, configure, and manage OpenStack for " -"high availability." -msgstr "" -"このガイドは OpenStack を高可用にインストール、設定、管理する方法について記載" -"します。" - -msgid "" -"This guide has gone through editorial changes to follow the OpenStack " -"documentation conventions. Various smaller issues have been fixed." -msgstr "" -"OpenStack のドキュメント規則に合わせて編集しました。軽微な問題を修正しまし" -"た。" - -msgid "" -"This method does not ensure a zero downtime since it has to recreate all the " -"namespaces and virtual routers on the node." -msgstr "" -"この方法は、ノードですべての名前空間と仮想ルーターを再作成する必要があるた" -"め、無停止を保証しません。" - -msgid "" -"To be sure all data will be highly available, you should be sure that you " -"store everything in the MySQL database (which is also highly available):" -msgstr "" -"確実にすべてのデータが高可用性にするために、確実にすべてのものを (高可用性" -"な) MySQL データベースに保存すべきです。" - -msgid "To verify the cluster status:" -msgstr "クラスターの状態を確認する方法:" - -msgid "" -"Typically, an active/active installation for a stateless service would " -"maintain a redundant instance, and requests are load balanced using a " -"virtual IP address and a load balancer such as HAProxy." -msgstr "" -"一般的にステートレスサービスをアクティブ / アクティブにインストールすると、冗" -"長なインスタンスを維持することになります。リクエストは HAProxy のような仮想 " -"IP アドレスとロードバランサーを使用して負荷分散されます。" - -msgid "" -"Typically, an active/passive installation for a stateless service would " -"maintain a redundant instance that can be brought online when required. " -"Requests may be handled using a virtual IP address to facilitate return to " -"service with minimal reconfiguration required." -msgstr "" -"一般的に、ステートレスサービスのアクティブ/パッシブ構成のインストールは、必要" -"に応じてオンラインにできる冗長なインスタンスを動作させます。リクエストは、必" -"要最小限の再設定でサービスを動作させられる、仮想 IP アドレスを使用して処理さ" -"れます。" - -msgid "Update your system and install the required packages: " -msgstr "" -"システムアップデートと必須パッケージのインストールをしてください: " -"" - -msgid "Use HA queues in RabbitMQ (x-ha-policy: all):" -msgstr "RabbitMQ における HA キューの使用 (x-ha-policy: all):" - -msgid "Use durable queues in RabbitMQ:" -msgstr "RabbitMQ での永続キューの使用:" - -msgid "" -"We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, " -"a logical grouping of several Erlang nodes." -msgstr "" -"RabbitMQブローカーを構成するため、各Erlangノードの論理グループでRabbitMQクラ" -"スタを構築します。" - -msgid "" -"We have to configure the OpenStack components to use at least two RabbitMQ " -"nodes." -msgstr "" -"2 つ以上の RabbitMQ ノードを使用するよう、OpenStack のコンポーネントを設定す" -"る必要があります。" - -msgid "You also need to create the OpenStack Identity Endpoint with this IP." -msgstr "" -"この IP を用いて OpenStack Identity エンドポイントを作成する必要があります。" - -msgid "" -"You can now add the Pacemaker configuration for Block Storage API resource. " -"Connect to the Pacemaker cluster with crm configure, and " -"add the following cluster resources:" -msgstr "" -"Block Storage API リソース用の Pacemaker 設定を追加できます。crm " -"configure を用いて Pacemaker クラスターに接続し、以下のクラスターリ" -"ソースを追加します。" - -msgid "" -"You can now add the Pacemaker configuration for OpenStack Identity resource. " -"Connect to the Pacemaker cluster with crm configure, and " -"add the following cluster resources:" -msgstr "" -"OpenStack Identity リソース用の Pacemaker 設定を追加できます。crm " -"configure を用いて Pacemaker クラスターに接続し、以下のクラスターリ" -"ソースを追加します。" - -msgid "" -"You can now add the Pacemaker configuration for OpenStack Networking Server " -"resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" -"OpenStack Networking Server リソース用の Pacemaker 設定を追加できます。" -"crm configure を用いて Pacemaker クラスターに接続し、以下" -"のクラスターリソースを追加します。" - -msgid "" -"You can now add the Pacemaker configuration for managing all network " -"resources together with a group. Connect to the Pacemaker cluster with " -"crm configure, and add the following cluster resources:" -msgstr "" -"すべてのネットワークリソースをグループと一緒に管理するための Pacemaker 設定を" -"追加できます。crm configure を用いて Pacemaker クラスター" -"に接続し、以下のクラスターリソースを追加します。" - -msgid "You can now check the Corosync connectivity with two tools." -msgstr "2 つのツールを用いて Corosync 接続性を確認できます。" - -msgid "" -"You may also use the alternate device path for the DRBD device, which may be " -"easier to remember as it includes the self-explanatory resource name:" -msgstr "" -"DRBD デバイスに対する代替デバイスパスを使用することもできます。これは自己説明" -"的なリソース名を含むため、より覚えやすいでしょう。" - -msgid "" -"You may now proceed with adding the Pacemaker configuration for neutron DHCP " -"agent resource. Connect to the Pacemaker cluster with crm " -"configure, and add the following cluster resources:" -msgstr "" -"OpenStack DHCP Agent リソース用の Pacemaker 設定を追加して、次に進むことがで" -"きます。crm configure を用いて Pacemaker クラスターに接続" -"し、以下のクラスターリソースを追加します。" - -msgid "" -"You may now proceed with adding the Pacemaker configuration for neutron L3 " -"agent resource. Connect to the Pacemaker cluster with crm " -"configure, and add the following cluster resources:" -msgstr "" -"Neutron L3 Agent リソース用の Pacemaker 設定を追加して、次に進むことができま" -"す。crm configure を用いて Pacemaker クラスターに接続し、" -"以下のクラスターリソースを追加します。" - -msgid "" -"You may now proceed with adding the Pacemaker configuration for neutron " -"metadata agent resource. Connect to the Pacemaker cluster with crm " -"configure, and add the following cluster resources:" -msgstr "" -"OpenStack Metadata Agent リソース用の Pacemaker 設定を追加して、次に進むこと" -"ができます。crm configure を用いて Pacemaker クラスターに" -"接続し、以下のクラスターリソースを追加します。" - -msgid "" -"You may then proceed with adding the Pacemaker configuration for the " -"Telemetry central agent resource. Connect to the Pacemaker cluster with " -"crm configure, and add the following cluster resources:" -msgstr "" -"Telemetry 中央エージェントリソース用の Pacemaker 設定を追加して、次に進むこと" -"ができます。crm configure を用いて Pacemaker クラスターに" -"接続し、以下のクラスターリソースを追加します。" - -msgid "You must also create the OpenStack Image API endpoint with this IP." -msgstr "" -"この IP を用いて OpenStack Image API エンドポイントを作成する必要があります。" - -msgid "" -"You must complete the next step while the MySQL database server is shut down." -msgstr "" -"MySQL データベースサーバーがシャットダウンしている間に、次の手順を完了する必" -"要があります。" - -msgid "You must create the Block Storage API endpoint with this IP." -msgstr "" -"この IP を用いて Block Storage API エンドポイントを作成する必要があります。" - -msgid "" -"You need to create the OpenStack Networking server endpoint with this IP." -msgstr "" -"この IP を用いて OpenStack Networking Server エンドポイントを作成する必要があ" -"ります。" - -msgid "" -"You need to edit your OpenStack Identity configuration file " -"(keystone.conf) and change the bind parameters:" -msgstr "" -"OpenStack Identity の設定ファイル (keystone.conf) を編集" -"し、バインドのパラメーターを変更する必要があります。" - -msgid "" -"You should see a status=joined entry for each of your " -"constituent cluster nodes." -msgstr "" -"組み込まれているそれぞれのクラスターノードの項目が status=joined になっていることが確認できるはずです。" - -msgid "" -"Your OpenStack services must now point their Block Storage API configuration " -"to the highly available, virtual cluster IP address — rather than a Block " -"Storage API server’s physical IP address as you normally would." -msgstr "" -"OpenStack サービスは、通常どおり Block Storage API サーバーの物理 IP アドレス" -"を指定する代わりに、Block Storage API の設定が高可用性と仮想クラスター IP ア" -"ドレスを指し示す必要があります。" - -msgid "" -"Your OpenStack services must now point their OpenStack Identity " -"configuration to the highly available, virtual cluster IP address — rather " -"than a OpenStack Identity server’s physical IP address as you normally would." -msgstr "" -"OpenStack サービスは、通常どおり OpenStack Identity サーバーの物理 IP アドレ" -"スを指定する代わりに、OpenStack Identity サーバーの設定が高可用性と仮想クラス" -"ター IP アドレスを指し示す必要があります。" - -msgid "" -"Your OpenStack services must now point their OpenStack Image API " -"configuration to the highly available, virtual cluster IP address — rather " -"than an OpenStack Image API server’s physical IP address as you normally " -"would." -msgstr "" -"OpenStack サービスは、通常どおり OpenStack Image API サーバーの物理 IP アドレ" -"スを指定する代わりに、OpenStack Image API の設定が高可用性と仮想クラスター " -"IP アドレスを指し示す必要があります。" - -msgid "" -"Your OpenStack services must now point their OpenStack Networking Server " -"configuration to the highly available, virtual cluster IP address — rather " -"than an OpenStack Networking server’s physical IP address as you normally " -"would." -msgstr "" -"OpenStack サービスは、通常どおり OpenStack Networking サーバーの物理 IP アド" -"レスを指定する代わりに、OpenStack Networking サーバーの設定が高可用性と仮想ク" -"ラスター IP アドレスを指し示す必要があります。" - -msgid "" -"a service group and order and " -"colocation constraints to ensure resources are started on " -"the correct nodes, and in the correct sequence." -msgstr "" -"リソースが適切なノードにおいて、適切な順序で起動されることを確実にする、サー" -"ビスの group, order および " -"colocation 制約。" - -msgid "ceilometer-collector" -msgstr "ceilometer-collector" - -msgid "cinder-scheduler" -msgstr "cinder-scheduler" - -msgid "cluster-glue" -msgstr "cluster-glue" - -msgid "configuring RabbitMQ to listen on that IP address," -msgstr "RabbitMQ がその IP アドレスでリッスンするよう設定します。" - -msgid "" -"configuring RabbitMQ to use a data directory residing on that DRBD device," -msgstr "" -"RabbitMQ が DRBD デバイスにあるデータディレクトリを使用するよう設定します。" - -msgid "configuring a DRBD device for use by RabbitMQ," -msgstr "RabbitMQ により使用するための DRBD デバイスを設定します。" - -msgid "corosync" -msgstr "corosync" - -msgid "crmsh" -msgstr "crmsh" - -msgid "current" -msgstr "カレント" - -msgid "heat-engine" -msgstr "heat-engine" - -msgid "l3_ha" -msgstr "l3_ha" - -msgid "" -"managing all resources, including the RabbitMQ daemon itself, with the " -"Pacemaker cluster manager." -msgstr "" -"RabbitMQ デーモン自身を含む、すべてのリソースを Pacemaker クラスターマネー" -"ジャーで管理します。" - -msgid "max_l3_agents_per_router" -msgstr "max_l3_agents_per_router" - -msgid "min_l3_agents_per_router" -msgstr "min_l3_agents_per_router" - -msgid "mysql PIDs" -msgstr "mysqlのPID" - -msgid "neutron-server" -msgstr "neutron-server" - -msgid "nova-conductor" -msgstr "nova-conductor" - -msgid "nova-scheduler" -msgstr "nova-scheduler" - -msgid "resource-agents" -msgstr "resource-agents" - -msgid "" -"selecting and assigning a virtual IP address (VIP) that can freely float " -"between cluster nodes," -msgstr "" -"クラスターノード間で自由に移動できる仮想 IP アドレス (VIP) を選択して割り当て" -"ます。" - -#. Put one translator per line, in the form of NAME , YEAR1, YEAR2 -msgid "translator-credits" -msgstr "Tomoyuki KATO , 2012-2014" diff --git a/doc/high-availability-guide/locale/zh_CN.po b/doc/high-availability-guide/locale/zh_CN.po deleted file mode 100644 index 534830fb..00000000 --- a/doc/high-availability-guide/locale/zh_CN.po +++ /dev/null @@ -1,2647 +0,0 @@ -# Translators: -# Alfred , 2015 -# niluyy , 2015 -# zhaochao , 2015 -# 刘俊朋 , 2015 -# 秋林 , 2015 -# 颜海峰 , 2014 -# -# OpenStack Infra , 2015. #zanata -msgid "" -msgstr "" -"Project-Id-Version: PACKAGE VERSION\n" -"POT-Creation-Date: 2015-09-15 07:40+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-30 01:43+0000\n" -"Last-Translator: niluyy \n" -"Language-Team: Chinese (China) (http://www.transifex.com/openstack/openstack-" -"manuals-i18n/language/zh_CN/)\n" -"Language: zh-CN\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Zanata 3.7.1\n" - -msgid "" -"(Optional) Configure the clustercheck utility." -msgstr "(可选) 配置 clustercheck 工具集。" - -msgid "2" -msgstr "2" - -msgid "2012" -msgstr "2012" - -msgid "2012-01-16" -msgstr "2012-01-16" - -msgid "2012-05-24" -msgstr "2012-05-24" - -msgid "2013" -msgstr "2013" - -msgid "2014" -msgstr "2014" - -msgid "2014-04-17" -msgstr "2014-04-17" - -msgid "2014-05-16" -msgstr "2014-05-16" - -msgid "2014-10-17" -msgstr "2014-10-17" - -msgid "2015-04-30" -msgstr "2015-04-30" - -msgid "Memcached service" -msgstr "Memcached 服务" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live Pacemaker configuration, and then make " -"changes as required." -msgstr "" -"crm configure 支持批量输入,因此可以拷贝粘贴上面到现有的 " -"Pacemaker 配置中,然后根据需要再作修改。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live Pacemaker configuration, and then make " -"changes as required. For example, you may enter edit p_ip_glance-" -"api from the crm configure menu and edit the " -"resource to match your preferred virtual IP address." -msgstr "" -"crm configure 支持批量输入,因此可以拷贝粘贴上面到现有的 " -"Pacemaker 配置中,然后根据需要再作修改。例如,可以从 crm configure 菜单中进入 edit p_ip_glance-api,编辑资源以匹配可" -"供使用的虚拟IP地址。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live pacemaker configuration, and then make " -"changes as required." -msgstr "" -"crm configure 支持批量输入,因此可以拷贝粘贴上面到现有的 " -"Pacemaker 配置中,然后根据需要再作修改。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live pacemaker configuration, and then make " -"changes as required. For example, you may enter edit p_ip_cinder-" -"api from the crm configure menu and edit the " -"resource to match your preferred virtual IP address." -msgstr "" -"crm configure 支持批量输入,因此可以拷贝粘贴上面到现有的 " -"Pacemaker 配置中,然后根据需要再作修改。例如,可以从 crm configure 菜单中进入 edit p_ip_cinder-api,编辑资源以匹配可" -"供使用的虚拟IP地址。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live pacemaker configuration, and then make " -"changes as required. For example, you may enter edit p_ip_keystone from the crm configure menu and edit the " -"resource to match your preferred virtual IP address." -msgstr "" -"crm configure 支持批量输入,因此可以拷贝粘贴上面到现有的 " -"Pacemaker 配置中,然后根据需要再作修改。例如,可以从 crm configure 菜单中进入 edit p_ip_keystone,编辑资源以匹配可供" -"使用的虚拟IP地址。" - -msgid "" -"crm configure supports batch input, so you may copy and " -"paste the above into your live pacemaker configuration, and then make " -"changes as required. For example, you may enter edit p_neutron-" -"server from the crm configure menu and edit the " -"resource to match your preferred virtual IP address." -msgstr "" -"crm configure 支持批量输入,因此可以拷贝粘贴上面到现有的 " -"Pacemaker 配置中,然后根据需要再作修改。例如,可以从 crm configure 菜单中进入 edit p_ip_keystone,编辑资源以匹配可供" -"使用的虚拟IP地址" - -msgid "" -"ms_drbd_mysql, the master/slave set managing the " -"mysql DRBD resource," -msgstr "ms_drbd_mysql,管理 DRBD 设备的主/从资源," - -msgid "" -"ms_drbd_rabbitmq, the master/slave set managing the " -"rabbitmq DRBD resource," -msgstr "ms_drbd_rabbitmq,管理 DRBD 设备的主/从资源," - -msgid "" -"mysql DRBD resource configuration (/etc/drbd.d/" -"mysql.res)" -msgstr "" -"mysql DRBD 资源配置文件( /etc/drbd.d/mysql." -"res )" - -msgid "" -"p_ceilometer-agent-central, a resource for managing the " -"Ceilometer Central Agent service" -msgstr "" -"p_ceilometer-agent-central, 用来管理 Ceilometer 监控代理服" -"务的资源 " - -msgid "" -"p_cinder-api, a resource for manage Block Storage API " -"service" -msgstr "" -"p_cinder-api 资源,对 OpenStack 身份认证服务进行管理。" - -msgid "" -"p_fs_mysql, a Pacemaker managed filesystem mounted to " -"/var/lib/mysql on whatever node currently runs the " -"MySQL service," -msgstr "" -"p_fs_mysql,Pacemaker 管理的文件系统,挂载点为 /" -"var/lib/mysql,该文件系统将在运行 MySQL 服务的节点上挂载," - -msgid "" -"p_fs_rabbitmq, a Pacemaker managed filesystem mounted to " -"/var/lib/rabbitmq on whatever node currently runs the " -"RabbitMQ service," -msgstr "" -"p_fs_rabbitmq,Pacemaker 管理的文件系统,挂载点为 " -"/var/lib/rabbitmq,该文件系统将在运行 RabbitMQ 服务的节" -"点上挂载," - -msgid "" -"p_glance-api, a resource for managing OpenStack Image API " -"service" -msgstr "p_glance-api 资源,对 OpenStack 镜像服务进行管理。" - -msgid "" -"p_ip_mysql, a virtual IP address for use by MySQL " -"(192.168.42.101)," -msgstr "" -"p_ip_mysql,MySQL 服务将会使用的虚拟 IP 地址" -"(192.168.42.101)," - -msgid "" -"p_ip_rabbitmq, a virtual IP address for use by RabbitMQ " -"(192.168.42.100)," -msgstr "" -"p_ip_rabbitmq,RabbitMQ 服务将会使用的虚拟 IP 地址" -"(192.168.42.100)," - -msgid "" -"p_neutron-agent-dhcp, a resource for managing the neutron " -"DHCP Agent service." -msgstr "" -"p_neutron-agent-dhcp资源,对neutron DHCP代理程序进行管理。" - -msgid "" -"p_neutron-l3-agent, a resource for manage Neutron L3 " -"Agent service" -msgstr "" -"p_neutron-l3-agent资源,对 neutron L3 代理程序进行管理。" - -msgid "" -"p_neutron-metadata-agent, a resource for manage Neutron " -"Metadata Agent service" -msgstr "" -"p_neutron-metadata-agent资源,对 neutron metadata 代理程序" -"进行管理。" - -msgid "" -"rabbitmq DRBD resource configuration (/etc/drbd." -"d/rabbitmq.res)" -msgstr "" -"rabbitmq DRBD 资源配置文件( /etc/drbd.d/" -"rabbitmq.res )" - -msgid "" -"fence-agents (Fedora only; all other distributions use " -"fencing agents from cluster-glue)" -msgstr "" -"fence-agents (说明:只针对 Fedora 发行版;其它 Linux 发行" -"版都使用 cluster-glue 软件包中的 fence 资源代理)" - -msgid "" -"pacemaker (Note that the crm shell should be downloaded " -"separately.)" -msgstr "" -"pacemaker (说明:crm 命令行工具需要另外单独下载。)" - -msgid " (LSB)" -msgstr " (LSB)" - -msgid " (LSB, alternate)" -msgstr " (LSB,另一种方法)" - -msgid " (systemd)" -msgstr " (systemd)" - -msgid " (upstart)" -msgstr " (upstart)" - -msgid "" -" When starting up a cluster (all nodes down) it will hold " -"the cluster quorum until all of the nodes become online and joint the " -"cluster first time (new in Corosync 2.0)." -msgstr "" -"当启动集群(所有节点关机)时,第一次它将拥有集群quorum直到所有" -"节点在线和加入集群(Corosync 2.0新增)。 " - -msgid "" -" supports batch input, so you may copy and paste the above " -"into your live pacemaker configuration, and then make changes as required. " -"For example, you may enter edit p_ip_mysql from the " -" menu and edit the resource to match your preferred virtual " -"IP address." -msgstr "" -" 支持批量输入的配置,因此可以直接复制上述配置示例,粘贴到实际" -"的 Pacemaker 配置环境,然后根据具体情况调整配置项。例如,在 菜单中,输入 edit p_ip_mysql,可以对虚拟 IP 地址资源进行" -"编辑。" - -msgid "" -" supports batch input, so you may copy and paste the above " -"into your live pacemaker configuration, and then make changes as required. " -"For example, you may enter edit p_ip_rabbitmq from the " -" menu and edit the resource to match your preferred virtual " -"IP address." -msgstr "" -" 支持批量输入的配置,因此可以直接复制上述配置示例,粘贴到实际" -"的 Pacemaker 配置环境,然后根据具体情况调整配置项。例如,在 菜单中,输入 edit p_ip_rabbitmq,可以对虚拟 IP 地址资源" -"进行编辑。" - -msgid "" -"A crucial aspect of high availability is the elimination of single points of " -"failure (SPOFs). A SPOF is an individual piece of equipment or software " -"which will cause system downtime or data loss if it fails. In order to " -"eliminate SPOFs, check that mechanisms exist for redundancy of:" -msgstr "" -"在高可用系统中,最基本的原则是排除单点故障。所谓单点故障,是指系统中的某一单" -"独部件(硬件设备或者软件组件),当它发生故障时会导致系统停机或者数据丢失。可" -"以通过检查下列系统组成部分中是否包含冗余机制来消除单点故障:" - -msgid "" -"A stateful service is one where subsequent requests to the service depend on " -"the results of the first request. Stateful services are more difficult to " -"manage because a single action typically involves more than one request, so " -"simply providing additional instances and load balancing will not solve the " -"problem. For example, if the Horizon user interface reset itself every time " -"you went to a new page, it wouldn't be very useful. OpenStack services that " -"are stateful include the OpenStack database and message queue." -msgstr "" -"有状态服务,是指客户端发送的后续请求依赖于之前相关请求的处理结果。由于单独一" -"项操作可能涉及若干相关请求,有状态服务相对难于管理,只是通过多个实例和负载均" -"衡无法实现高可用。例如,如果每次访问 Horizon 时都是打开一个全新的页面(之前的" -"操作都消失了),对于用户来说是毫无意义的。OpenStack 中有状态服务包括 " -"OpenStack 数据库和消息队列。" - -msgid "" -"A stateless service is one that provides a response after your request, and " -"then requires no further attention. To make a stateless service highly " -"available, you need to provide redundant instances and load balance them. " -"OpenStack services that are stateless include nova-api, nova-conductor, glance-api, " -"keystone-api, neutron-api and nova-" -"scheduler." -msgstr "" -"无状态服务是指,当该服务对一个请求作出响应之后,不会再有任何相关操作。实现无" -"状态服务的高可用,只需要同时运行该服务的多个实例,并保证这些实例的负载均衡即" -"可。OpenStack 中无状态的服务包括: nova-api, nova-conductor, " -"glance-api, keystone-api, neutron-" -"api and nova-scheduler 。" - -msgid "" -"A typical active/active installation for a stateful service would include " -"redundant services with all instances having an identical state. For " -"example, updates to one instance of a database would also update all other " -"instances. This way a request to one instance is the same as a request to " -"any other. A load balancer manages the traffic to these systems, ensuring " -"that operational systems always handle the request." -msgstr "" -"而有状态服务的“主/主”模式高可用则是维护多个完全相同的冗余实例。例如,更新其中" -"一个数据库实例时,其它所有实例都会被更新。这样客户端发送给其中一个实例的请求" -"相当于发给了所有实例。负载调度程序管理客户端和这些实例之间的连接,确保请求发" -"送到正常运行的服务实例。" - -msgid "" -"A typical active/passive installation for a stateful service maintains a " -"replacement resource that can be brought online when required. A separate " -"application (such as Pacemaker or Corosync) monitors these services, " -"bringing the backup online as necessary." -msgstr "" -"有状态服务的“主/从”模式高可用则是维护一套额外的备份资源,当故障发生时,可以直" -"接替代失效部份继续工作。单独的应用程序(如 Pacemaker 、Corosync 等)负责监控" -"各项服务,并在发生故障时激活备份资源。" - -msgid "API node cluster stack" -msgstr "API 服务节点 HA 集群配置" - -msgid "API services" -msgstr "API 服务" - -msgid "Accidental deletion or destruction of data." -msgstr "意外发生的数据删除和数据损坏。" - -msgid "Active/Active" -msgstr "主/主" - -msgid "Active/Passive" -msgstr "主/从" - -msgid "Add Block Storage API resource to Pacemaker" -msgstr "在 Pacemaker 中添加 OpenStack 块设备存储服务资源" - -msgid "Add MySQL resources to Pacemaker" -msgstr "在 Pacemaker 中添加 MySQL 资源" - -msgid "Add OpenStack Identity resource to Pacemaker" -msgstr "在 Pacemaker 中添加 OpenStack 认证服务资源" - -msgid "Add OpenStack Image API resource to Pacemaker" -msgstr "在 Pacemaker 中添加 OpenStack 镜像服务资源" - -msgid "Add OpenStack Networking Server resource to Pacemaker" -msgstr "在 Pacemaker 中添加 OpenStack 网络服务资源" - -msgid "Add RabbitMQ resources to Pacemaker" -msgstr "在 Pacemaker 中添加 RabbitMQ 资源" - -msgid "Add neutron DHCP agent resource to Pacemaker" -msgstr "在 Pacemaker 中添加 neutron DHCP 代理程序资源" - -msgid "Add neutron L3 agent resource to Pacemaker" -msgstr "在 Pacemaker 中添加 neutron L3 代理程序资源" - -msgid "Add neutron metadata agent resource to Pacemaker" -msgstr "在 Pacemaker 中添加 neutron metadata 代理程序资源" - -msgid "Add the Telemetry central agent resource to Pacemaker" -msgstr "在 Pacemaker 中添加 Telemetry 监控中心资源" - -msgid "Adjust the configuration:" -msgstr "调整配置文件:" - -msgid "After each change of this file, you should restart HAProxy." -msgstr "每次修改配置文件之后,必须重启 HAProxy 服务。" - -msgid "" -"After the copy make sure that all files are the same, you can do this by " -"using the following command:" -msgstr "复制之后需确保所有的文件都是相同的,你能通过使用如下命令确认:" - -msgid "All OpenStack API services" -msgstr "所有 OpenStack API 服务" - -msgid "All OpenStack configuration files should refer to virtual IP addresses." -msgstr "所有 OpenStack 组件的配置中也相应使用虚拟 IP 地址。" - -msgid "" -"All OpenStack projects have an API service for controlling all the resources " -"in the Cloud. In active/active mode, the most common setup is to scale out " -"these services on at least two nodes and to use load balancing and a virtual " -"IP address (with HAProxy and " -"Keepalived in this setup)." -msgstr "" -"所有的OpenStack项目都包含一个API服务,这些API负责管理整个云平台的各个资源。在" -"主/主模式,通常的方案是让所有这些API服务分布到两台以上的服务节点上,通过负载" -"均衡和虚拟机IP地址(示例中使用HAProxy 和 " -"Keepalived)。" - -msgid "All OpenStack schedulers" -msgstr "所有 OpenStack 调度相关的服务" - -msgid "All other nodes can now be started using:" -msgstr "其他所有节点现在能被启动:" - -msgid "All routers will be highly available by default." -msgstr "缺省情况下,所有路由器应该高可用。" - -msgid "Applications and automatic service migration" -msgstr "应用程序以及服务自动迁移工具;" - -msgid "" -"Automatic L3 agent failover for routers via the " -"configuration option in /etc/neutron/neutron.conf." -msgstr "" -"通过/etc/neutron/neutron.conf文件中的 选" -"项配置针对路由器的L3代理失效自动恢复。" - -msgid "Begin trunk designation." -msgstr "开始主干指定。" - -msgid "" -"Besides installing the Corosync package, you must also " -"create a configuration file, stored in /etc/corosync/corosync." -"conf. Corosync can be configured to work with either multicast or " -"unicast IP addresses." -msgstr "" -"除了安装 Corosync 之外,还需要创建一个配置文件 /" -"etc/corosync/corosync.conf 。Corosync 可以使用组播或者单播 IP 地址" -"进行集群心跳通信。" - -msgid "Change the number of expected votes for a cluster to be quorate" -msgstr "改变一个集群合法的期望投票数" - -msgid "Change the number of votes assigned to a node" -msgstr "改变分配给节点的投票数" - -msgid "" -"Check that the nodes can access each other through the firewall. Depending " -"on your environment, this might mean adjusting iptables, as in:" -msgstr "" -"检查各节点之间的网络通信有没有被防火墙拦截。根据布署环境的不同,检查方法也各" -"不相同。如,某些环境中,这一步只需要对 iptables 进行配置:" - -msgid "Cloud controller cluster stack" -msgstr "OpenStack 控制服务 HA 集群配置" - -msgid "Configure Block Storage API service" -msgstr "配置 OpenStack 块设备存储服务" - -msgid "Configure DRBD" -msgstr "配置 DRBD" - -msgid "Configure MariaDB with Galera." -msgstr "配置带Galera的MariaDB。" - -msgid "Configure OpenStack Identity service" -msgstr "配置 OpenStack 身份认证服务" - -msgid "Configure OpenStack Identity to listen on the VIP address," -msgstr "配置 OpenStack 身份认证服务监听虚拟 IP 地址," - -msgid "Configure OpenStack Image service API" -msgstr "配置OpenStack镜像服务API" - -msgid "Configure OpenStack Networking server" -msgstr "配置 OpenStack 网络服务" - -msgid "Configure OpenStack Networking to listen on the virtual IP address," -msgstr "配置 OpenStack 网络服务监听虚拟 IP 地址," - -msgid "Configure OpenStack services for highly available MySQL" -msgstr "配置 OpenStack 各服务使用高可用的 MySQL 数据库" - -msgid "Configure OpenStack services for highly available RabbitMQ" -msgstr "配置 OpenStack 各服务使用高可用的 RabbitMQ 服务" - -msgid "Configure OpenStack services to use Rabbit HA queues" -msgstr "配置其它 OpenStack 服务使用高可用的 RabbitMQ 服务" - -msgid "Configure OpenStack services to use RabbitMQ" -msgstr "配置 OpenStack 各服务使用高可用的 RabbitMQ 服务" - -msgid "Configure OpenStack services to use high available OpenStack Image API" -msgstr "配置 OpenStack 各服务使用高可用的 OpenStack镜像服务" - -msgid "Configure OpenStack services to use highly available Block Storage API" -msgstr "配置 OpenStack 各服务使用高可用的 OpenStack 块设备存储服务" - -msgid "" -"Configure OpenStack services to use highly available OpenStack Networking " -"server" -msgstr "配置 OpenStack 各服务使用高可用的 OpenStack 网络服务" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Identity" -msgstr "配置 OpenStack 各服务使用高可用的 OpenStack 身份认证服务" - -msgid "Configure OpenStack services to use the virtual IP address." -msgstr "配置 OpenStack 服务使用该虚拟 IP 地址。" - -msgid "Configure OpenStack services to use this IP address." -msgstr "配置 OpenStack 服务使用该虚拟 IP 地址。" - -msgid "Configure Pacemaker group" -msgstr "配置 Pacemaker 资源组" - -msgid "Configure RabbitMQ" -msgstr "配置 RabbitMQ" - -msgid "Configure RabbitMQ for HA queues" -msgstr "配置 RabbitMQ 实现高可用的消息队列" - -msgid "Configure Telemetry central agent service" -msgstr "配置 Telemetry 监中心" - -msgid "Configure monitor service (used by HAProxy):" -msgstr "配置监控服务(HAProxy服务使用):" - -msgid "Configure the OpenStack Image service to listen on the VIP address." -msgstr "配置OpenStack镜像服务监听虚拟IP地址。" - -msgid "Configure the VIP" -msgstr "配置VIP" - -msgid "Configuring Block Storage to listen on the VIP address" -msgstr "配置块存储监听于VIP地址" - -msgid "Configuring MySQL to listen on that IP address" -msgstr "配置 MySQL 监听那个 IP 地址" - -msgid "Configuring MySQL to use a data directory residing on that DRBD device" -msgstr "配置MySQL使用位于DRBD设备上的数据目录" - -msgid "Configuring OpenStack services to use this IP address" -msgstr "使用该 IP 地址配置 OpenStack 服务" - -msgid "Configuring a DRBD device for use by MySQL" -msgstr "配置被MySQL使用的DRBD设备" - -msgid "" -"Connect an additional quorum device to allow small clusters remain quorate " -"during node outages" -msgstr "在节点停止服务期间,连接一个额外的quorum设备来使小集群仍然处于合法状态" - -msgid "Conversion to DocBook." -msgstr "转换为DocBook格式。" - -msgid "" -"Copy this file to all other databases servers and change the value of " -"wsrep_cluster_address and wsrep_node_name accordingly." -msgstr "" -"将此文件拷贝到所有的其他数据库服务器上并相应的修改" -"wsrep_cluster_addresswsrep_node_name" -"的值。" - -msgid "Copyright details are filled in by the template." -msgstr "版权信息来自于模板" - -msgid "Corosync configuration file (corosync.conf)" -msgstr "Corosync 配置文件(corosync.conf)" - -msgid "" -"Corosync configuration file fragment (corosync.conf)" -msgstr "" -"Corosync 配置文件片断:\n" -"( corosync.conf )" - -msgid "" -"Corosync is started as a regular system service. Depending on your " -"distribution, it may ship with an LSB init script, an upstart job, or a " -"systemd unit file. Either way, the service is usually named corosync:" -msgstr "" -"Corosync 启动方法和普通的系统服务没有区别,根据 Linux 发行版的不同,可能是 " -"LSB init 脚本、upstart 任务、systemd 服务。不过习惯上,都会统一使用 " -"corosync 这一名称:" - -msgid "Create a file system" -msgstr "创建文件系统" - -msgid "" -"Create file /etc/xinetd.d/galera-monitor with the " -"following contents:" -msgstr "" -"使用以下内容创建/etc/xinetd.d/galera-monitor 文件:" - -msgid "" -"Create file etc/sysconfig/clustercheck with the " -"following contents:" -msgstr "使用以下内容创建文件etc/sysconfig/clustercheck :" - -msgid "Create the /etc/mysql/conf.d/wsrep.cnf file." -msgstr "创建/etc/mysql/conf.d/wsrep.cnf文件" - -msgid "" -"Create the Galera configuration file /etc/my.cnf.d/galera.cnf with the following contents:" -msgstr "" -"使用以下内容创建Galera配置文件/etc/my.cnf.d/galera.cnf " -":" - -msgid "" -"Create the database user required by clustercheck:" -msgstr "创建clustercheck需要的数据库用户:" - -msgid "" -"Creates the /dev/drbd1 device node, attaches the DRBD " -"device to its backing store, and connects the DRBD node to its peer. Must be " -"completed on both nodes." -msgstr "" -"创建 /dev/drbd1 设备文件,将指定的后端存储设备附加到该 " -"DRBD 资源,同时建立所有节点服务器之间的通信连接。两台节点服务器上都必须完成该" -"操作。" - -msgid "" -"Creates the /dev/drbd0 device node, attaches the DRBD " -"device to its backing store, and connects the DRBD node to its peer. Must be " -"completed on both nodes." -msgstr "" -"创建 /dev/drbd0 设备文件,将指定的后端存储设备附加到该 " -"DRBD 资源,同时建立所有节点服务器之间的通信连接。两台节点服务器上都必须完成该" -"操作。" - -msgid "Creating a file system" -msgstr "创建文件系统" - -msgid "" -"Currently, there's no native feature to make the LBaaS agent highly " -"available using the default plug-in HAProxy. A common way to make HAProxy " -"highly available is to use the VRRP (Virtual Router Redundancy Protocol). " -"Unfortunately, this is not yet implemented in the LBaaS HAProxy plug-in." -msgstr "" -"目前 Neutron LBaaS 代理服务是无法通过其自带的 HAProxy 插件 实现高可用的。实" -"现 HAProxy 高可用常见的方案是使用 VRRP (Virtual Router Redundancy Protocol ," -"虚拟路由冗余协议),不过 LBaaS HAProxy 插件目前还不支持该协议。" - -msgid "Data loss" -msgstr "数据丢失" - -msgid "Database" -msgstr "数据库" - -msgid "Description" -msgstr "描述" - -msgid "Do this configuration on all services using RabbitMQ:" -msgstr "对所有使用 RabbitMQ 的组件进行配置:" - -msgid "" -"Due to limitations of a polling model, a single instance of this agent can " -"be polling a given list of meters, unless workload partitioning has been " -"configured for multiple central agents. In this setup, we install this " -"service on the API nodes also in the active / passive mode." -msgstr "" -"由于轮循模型的限制,代理的单个实例只能轮循给定的测量值列表,除非通过配置多个" -"中心代理分担负载。在示例中,在API节点,我们以主/备模式安装这个服务。" - -msgid "Edit /etc/ceilometer/ceilometer.conf:" -msgstr "编辑 /etc/ceilometer/ceilometer.conf:" - -msgid "Edit /etc/cinder/cinder.conf:" -msgstr "编辑 /etc/cinder/cinder.conf:" - -msgid "Edit /etc/glance/glance-api.conf:" -msgstr "编辑 /etc/glance/glance-api.conf:" - -msgid "Edit /etc/neutron/neutron.conf:" -msgstr "编辑 /etc/neutron/neutron.conf:" - -msgid "Example configuration with two hosts:" -msgstr "使用 2 个 memcached 节点的配置示例:" - -msgid "Facility services such as power, air conditioning, and fire protection" -msgstr "辅助设施,如电源、空调、防火等;" - -msgid "" -"Finally, we need to create a service group to ensure that " -"the virtual IP is linked to the API services resources:" -msgstr "" -"最后,创建一个资源组 group,将所有 API 服务资源和该虚拟 " -"IP 地址联系起来。" - -msgid "First of all, you need to download the resource agent to your system:" -msgstr "首先,下载 Pacemaker 资源代理:" - -msgid "" -"First, you must select and assign a virtual IP address (VIP) that can freely " -"float between cluster nodes." -msgstr "首先选择并绑定一个可以在各集群节点之间迁移的虚拟 IP 地址 (即 VIP )。" - -msgid "" -"For OpenStack Compute, for example, if your OpenStack Image API service IP " -"address is 192.168.42.103 as in the configuration " -"explained here, you would use the following configuration in your " -"nova.conf file:" -msgstr "" -"以 OpenStack 计算服务为例,如果 OpenStack 镜像服务的虚拟 IP 地址是 " -"192.168.42.103,那么在 OpenStack 计算服务的配置文件" -"( nova.conf )中应该使用如下配置:" - -msgid "" -"For OpenStack Image, for example, if your MySQL service IP address is " -"192.168.42.101 as in the configuration explained here, " -"you would use the following line in your OpenStack Image registry " -"configuration file (glance-registry.conf):" -msgstr "" -"以 OpenStack 镜像服务为例,如果 MySQL 数据库的虚拟 IP 地址是 " -"192.168.42.101,那么在 OpenStack 镜像服务的配置文件" -"( glance-registry.conf )中应该使用如下配置:" - -msgid "" -"For OpenStack Image, for example, if your RabbitMQ service IP address is " -"192.168.42.100 as in the configuration explained here, " -"you would use the following line in your OpenStack Image API configuration " -"file (glance-api.conf):" -msgstr "" -"以 OpenStack 镜像服务为例,如果 RabbitMQ 服务的虚拟 IP 地址是 " -"192.168.42.100,那么在 OpenStack 镜像服务的配置文件" -"( glance-api.conf )中应该使用如下配置:" - -msgid "" -"For UDPU, every node that should be a member of the membership must be " -"specified." -msgstr "对于 UDPU ,每台节点服务器都需要配置所属的传输组。" - -msgid "" -"For a new MySQL installation with no existing data, you may also run the " -" command:" -msgstr "如果使用全新的数据库,可以执行 命令:" - -msgid "" -"For example with OpenStack Compute, if your OpenStack Identity service IP " -"address is 192.168.42.103 as in the configuration " -"explained here, you would use the following line in your API configuration " -"file (api-paste.ini):" -msgstr "" -"以 OpenStack 计算服务为例,如果 OpenStack 身份认证服务的虚拟 IP 地址是 " -"192.168.42.103,那么在 OpenStack 计算服务的配置文件" -"( api-paste.ini )中应该使用如下配置:" - -msgid "" -"For example, you should configure OpenStack Compute for using highly " -"available OpenStack Networking server in editing nova.conf file:" -msgstr "" -"以 OpenStack 计算服务为例,在 OpenStack 计算服务的配置文件( nova." -"conf )中应该使用如下配置:" - -msgid "" -"For firewall configurations, note that Corosync communicates over UDP only, " -"and uses mcastport (for receives) and mcastport " -"- 1 (for sends)." -msgstr "" -"Corosync 通信使用 UDP 协议,端口为 mcastport (接收数据)" -"和 mcastport - 1 (发送数据)。配置防火墙时需要打开这两个" -"端口。" - -msgid "" -"For the next step create a back-up file of the debian.cnf file in /etc/mysql on all database servers. " -"Should something go wrong just copy the back-up file back." -msgstr "" -"下一步执行之前创建所有数据库服务器的debian.cnf文件的备份" -"文件,该文件位于/etc/mysql。当有什么错误发生时,仅需拷贝" -"备份文件回来。" - -msgid "From the debian.cnf get the database password:" -msgstr "从debian.cnf中获取数据库密码:" - -msgid "Get a list of nodes known to the quorum service" -msgstr "获取到quorum 服务的已知节点列表" - -msgid "HA using active/active" -msgstr "主/主模式高可用集群" - -msgid "HA using active/passive" -msgstr "主/从模式高可用集群" - -msgid "" -"HAProxy is a very fast and reliable solution offering high availability, " -"load balancing, and proxying for TCP and HTTP-based applications. It is " -"particularly suited for web sites crawling under very high loads while " -"needing persistence or Layer 7 processing. Supporting tens of thousands of " -"connections is clearly realistic with today’s hardware." -msgstr "" -"HAProxy 是高效而且可靠的应用程高可用、负载均衡以及代理解决方案,适用于所有适" -"用于基于 TCP 和 HTTP 通信的应用程序,特别是那些在极高负载下仍要保持良好运行同" -"时还必须支持会话保持以及其它 7 层处理操作的 Web 站点。在当前流行的硬件配置条" -"件下,HAProxy 可以轻易地支持同时数十万计的并发连接。" - -msgid "HAProxy nodes" -msgstr "HAProxy 节点服务器" - -msgid "Here is an example for the HAProxy configuration file:" -msgstr "下面是HAProxy配置文件的示例:" - -msgid "High Availability systems seek to minimize two things:" -msgstr "实现系统高可用是为了减少以下 2 种异常情况:" - -msgid "" -"High-availability systems typically achieve an uptime percentage of 99.99% " -"or more, which roughly equates to less than an hour of cumulative downtime " -"per year. In order to achieve this, high availability systems should keep " -"recovery times after a failure to about one to two minutes, sometimes " -"significantly less." -msgstr "" -"通常,高可用系统能够保证 99.99% 的在线时间,相当于一年之中发生系统故障的累积" -"时间不超过 1 个小时。要达到这一目标,高可用系统应将故障恢复时间控制在 1 ~ 2 " -"分钟之内甚至更短。" - -msgid "Highly available Block Storage API" -msgstr "高可用 OpenStack 块设备存储服务" - -msgid "Highly available MySQL" -msgstr "高可用的MySQL" - -msgid "Highly available OpenStack Identity" -msgstr "高可用 OpenStack 身份认证服务" - -msgid "Highly available OpenStack Image API" -msgstr "高可用 OpenStack 镜像 API 服务" - -msgid "Highly available OpenStack Networking server" -msgstr "高可用 OpenStack 网络服务" - -msgid "Highly available RabbitMQ" -msgstr "高可用的 RabbitMQ" - -msgid "Highly available Telemetry central agent" -msgstr "高可用 Telemetry 监控代理" - -msgid "Highly available neutron DHCP agent" -msgstr "高可用 neutron DHCP 代理程序" - -msgid "Highly available neutron L3 agent" -msgstr "高可用 neutron L3 代理程序" - -msgid "Highly available neutron metadata agent" -msgstr "高可用 neutron metadata 代理程序" - -msgid "How frequently to retry connecting with RabbitMQ:" -msgstr "重新尝试连接 RabbitMQ 服务的时间间隔:" - -msgid "How long to back-off for between retries when connecting to RabbitMQ:" -msgstr "每次重新尝试连接 RabbitMQ 服务应后延多长时间:" - -msgid "" -"If HAProxy is used to load-balance client access to MariaDB, as described in " -"the HAProxy section of this document, " -"you can use the clustercheck utility to improve " -"health checks." -msgstr "" -"如本文档HAProxy章节所描述, 如果" -"HAProxy用来负载均衡MariaDB的访问,您可以用clustercheck工具集改善心跳检测。" - -msgid "" -"If the is set to yes, the broadcast address is used for " -"communication. If this option is set, should not be set." -msgstr "" -"如果将 设置为 yes ,集群心跳将通过广播实现。设置该参数时," -"不能设置 。" - -msgid "" -"If the cluster is working, you can now proceed to creating users and " -"passwords for queues." -msgstr "如果集群运行正常,就可以开始为消息队列创建用户和密码。" - -msgid "" -"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " -"lines under the service stanza, which enables Pacemaker to start up." -msgstr "" -"如果是在 Ubuntu 14.04 系统中运行 Corosync 2,那么应该将 stanza 对应的 " -"service 配置段删除或者全部注释,以确保 Pacemaker 可以启动。" - -msgid "" -"If you are using Corosync version 2, use the utility as it " -"is a direct replacement for ." -msgstr "" -"如果使用 Corosync v2 版本,请使用 命令的替代命令 " -" 。" - -msgid "" -"If you are using both private and public IP addresses, you should create two " -"Virtual IP addresses and define your endpoint like this:" -msgstr "" -"如果要同时使用私有和公开的 IP 地址,需要创建两个虚拟 IP 地址资源,并建立类似" -"如下的服务端点:" - -msgid "" -"If you are using both private and public IP, you should create two Virtual " -"IPs and define your endpoint like this:" -msgstr "" -"如果要同时使用私有和公开的 IP 地址,需要创建两个虚拟 IP 地址资源,并建立类似" -"如下的服务端点:" - -msgid "" -"If you are using the horizon dashboard, you should edit the " -"local_settings.py file:" -msgstr "" -"如果配置了 Horizon 面板,也需要修改 Horizon 的配置文件 " -"local_settings.py : " - -msgid "" -"If you change the configuration from an old setup which did not use HA " -"queues, you should interrupt the service:" -msgstr "" -"如果是直接俢改没有启用队列镜像特性的 RabbitMQ 服务的配置,那么对服务作一次重" -"置:" - -msgid "" -"If you have mariaDB already installed you need to re-apply all the " -"permissions from the installation guide. It will purge all privileges!" -msgstr "" -"如果您已经安装了mariaDB,您需要重新申请安装指南中的所有权限。此将清理所有的权" -"限!" - -msgid "" -"In Corosync configurations using redundant networking (with more than one " -"), you must select a Redundant Ring Protocol (RRP) mode " -"other than none. active is the " -"recommended RRP mode." -msgstr "" -"Cororsync 可以使用冗余的心跳网络(即多个 配置),但是必须同" -"时将 RRP 模式设置为除 none之外的其它值,建议使用 " -"active 模式。" - -msgid "" -"In an active/active configuration, systems also use a backup but will manage " -"both the main and redundant systems concurrently. This way, if there is a " -"failure the user is unlikely to notice. The backup system is already online, " -"and takes on increased load while the main system is fixed and brought back " -"online." -msgstr "" -"在“主/主”模式中,服务的冗余实例和主实例会同时工作。这样主实例发生故障,不会对" -"用户产生影响,因为冗余实例一直处于在线状态,后续客户端的请求直接由冗余实例处" -"理,而主实例的故障恢复可以同步进行。" - -msgid "" -"In an active/passive configuration, systems are set up to bring additional " -"resources online to replace those that have failed. For example, OpenStack " -"would write to the main database while maintaining a disaster recovery " -"database that can be brought online in the event that the main database " -"fails." -msgstr "" -"在“主/从”模式中,当系统中的资源失效时,新的资源会被激活,替代失效部份继续提供" -"服务。例如,在 OpenStack 集群中,可以在主数据库之外维护一套灾备数据库,当主数" -"据库发生故障时,激活灾备数据库可以保证集群继续正常运行。" - -msgid "" -"In order for Pacemaker monitoring to function properly, you must ensure that " -"MySQL's database files reside on the DRBD device. If you already have an " -"existing MySQL database, the simplest approach is to just move the contents " -"of the existing /var/lib/mysql directory into the newly " -"created filesystem on the DRBD device." -msgstr "" -"要通过 Pacemaker 实现 MySQL 高可用,必须首先保证 MySQL 的数据文件使用 DRBD 存" -"储设备。如果使用已有的数据库,最简单的方法是将已有的数据库文件 /" -"var/lib/mysql 迁移到 DRBD 设备之上的文件系统中。" - -msgid "" -"In order for Pacemaker monitoring to function properly, you must ensure that " -"RabbitMQ's .erlang.cookie files are identical on all " -"nodes, regardless of whether DRBD is mounted there or not. The simplest way " -"of doing so is to take an existing .erlang.cookie from " -"one of your nodes, copying it to the RabbitMQ data directory on the other " -"node, and also copying it to the DRBD-backed filesystem." -msgstr "" -"要通过 Pacemaker 实现 RabbitMQ 高可用,必须首先保证 RabbitMQ 的文件 " -".erlang.cookie 不论在 DRBD 设备有没有挂载的情况下,都完" -"全相同。最简单的方法是将其中一台节点服务器上已经生成的 .erlang." -"cookie 文件复制到所有其它节点,同时也复制一份到 DRBD 设备上的文件" -"系统之中。" - -msgid "" -"In the /etc/mysql/my.conf file, make the following " -"changes:" -msgstr "在/etc/mysql/my.conf文件中,进行如下修改:" - -msgid "" -"In the event that a component fails and a back-up system must take on its " -"load, most high availability systems will replace the failed component as " -"quickly as possible to maintain necessary redundancy. This way time spent in " -"a degraded protection state is minimized." -msgstr "" -"当某个组件崩溃后, 备用系统必须把该组件的工作负载承接过来。绝大多数的高可用系" -"统会尽快的把崩溃的组件替换掉用以保证系统的必要冗余安全性。该处理方案中系统处" -"在保护降级状态中的时间要尽可能的少" - -msgid "" -"In versions prior to Juno, this option was called " -"glance_api_servers in the [DEFAULT] " -"section." -msgstr "" -"对于 Juno 之前的版本,该配置项对应的是 [DEFAULT] 段之下的 " -"glance_api_servers。" - -msgid "" -"Initializes DRBD metadata and writes the initial set of metadata to " -"/dev/data/rabbitmq. Must be completed on both nodes." -msgstr "" -"初始化 DRBD 元数据,并在 /dev/data/rabbitmq 上初始元数据" -"集。两台节点服务器上都必须完成该操作。" - -msgid "" -"Initializes DRBD metadata and writes the initial set of metadata to " -"/dev/data/mysql. Must be completed on both nodes." -msgstr "" -"初始化 DRBD 元数据,并在 /dev/data/mysql 上初始元数据集。" -"两台节点服务器上都必须完成该操作。" - -msgid "Install RabbitMQ" -msgstr "安装 RabbitMQ" - -msgid "Install packages" -msgstr "安装软件包" - -msgid "Installing Galera through a MySQL version patched for wsrep:" -msgstr "为已经加上 wresp 补丁的 MySQL 数据库安装 Galera :" - -msgid "Introduction to OpenStack High Availability" -msgstr "OpenStack高可用介绍" - -msgid "Make sure a sensible password is used." -msgstr "确保使用一个便于理解的密码。" - -msgid "" -"Make sure you have SSH root access on the other servers. From the primary " -"database server, copy the debian.cnf file to all other " -"servers by running the following command:" -msgstr "" -"确保您用户其它服务器上的SSH root权限。通过运行一下命令,从主数据库服务器拷贝" -"debian.cnf到其它服务器:" - -msgid "" -"Making stateful services highly available can depend on whether you choose " -"an active/passive or active/active configuration." -msgstr "实现有状态服务高可用的方案有“主/从”和“主/主” 2 种模式。" - -msgid "" -"Making the Block Storage (cinder) API service highly available in active / " -"passive mode involves:" -msgstr "使得块存储(cinder)API服务在主/被模式中高可用包括:" - -msgid "" -"Making the Telemetry central agent service highly available in active / " -"passive mode involves managing its daemon with the Pacemaker cluster manager." -msgstr "" -"Telemetry 监控中心的主/从模式高可用是通过 Pacemaker 管理其后台守护进程实现。" - -msgid "Manage network resources" -msgstr "组织网络相关资源" - -msgid "" -"Manage the OpenStack Image API daemon with the Pacemaker cluster manager." -msgstr "使用Pacemaker管理OpenStack集群的镜像API守护进程。" - -msgid "" -"Manage the OpenStack Networking API Server daemon with the Pacemaker cluster " -"manager," -msgstr "使用 Pacemaker 管理 OpenStack 网络服务," - -msgid "Managing Block Storage API daemon with the Pacemaker cluster manager" -msgstr "使用 Pacemaker 管理 OpenStack 块设备存储服务" - -msgid "Managing OpenStack Identity daemon with the Pacemaker cluster manager," -msgstr "使用 Pacemaker 管理 OpenStack 身份认证服务," - -msgid "" -"Managing all resources, including the MySQL daemon itself, with the " -"Pacemaker cluster manager" -msgstr "使用 Pacemaker 管理上述所有资源,包括 MySQL 数据库" - -msgid "MariaDB with Galera (Red Hat-based platforms)" -msgstr "Galera MariaDB(基于红帽平台)" - -msgid "" -"MariaDB with Galera provides synchronous database replication in an active-" -"active, multi-master environment. High availability for the data itself is " -"managed internally by Galera, while access availability will be managed by " -"HAProxy." -msgstr "" -"在主-主,多主环境下,Galera MariaDB提供数据库同步。数据自身的高可用由Galera内" -"部管理,同时访问高可用由HAProxy管理。" - -msgid "" -"Maximum number of network nodes to be used for the HA router. The value can " -"be larger than 2 but needs to be at least 2." -msgstr "路由器HA的网络节点最大数。值可以比2大,但是应该至少是2。" - -msgid "" -"Maximum retries with trying to connect to RabbitMQ (infinite by default):" -msgstr "连接 RabbitMQ 服务时最大的重试次数(默认没有限制):" - -msgid "Memcached" -msgstr "Memcached" - -msgid "" -"Memory caching is managed by oslo-incubator, so the way to use multiple " -"memcached servers is the same for all projects." -msgstr "" -"基于内存的缓存统一由 oslo-incubator 管理,因此对 OpenStack 服务来说,使用多" -"个 memcached 服务节点作为后端的方法完全相同。" - -msgid "" -"Minimum number of network nodes to be used for the HA router. A new router " -"creation will fail unless there are at least network nodes " -"available. The value should not be less than 2." -msgstr "" -"路由器HA的最小网络节点数。除非有最少的网络节点可用,新的路由" -"器创建失败。这个值不能比2小。" - -msgid "" -"Minor cleanup of typos, otherwise no major revisions for Icehouse release." -msgstr "清理一些拼写错误,相对于 Icehouse 版本没有大的改动。" - -msgid "" -"Mirrored queues in RabbitMQ improve the availability of service since it " -"will be resilient to failures." -msgstr "RabbitMQ 实现队列镜像更能提高整个集群的高可用性。" - -msgid "" -"Most OpenStack services can use Memcached to " -"store ephemeral data, such as tokens. Although Memcached does not support typical forms of redundancy, such as " -"clustering, OpenStack services can use almost any number of instances by " -"configuring multiple hostnames or IP addresses. The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance only impacts a percentage of the objects " -"and the client automatically removes it from the list of instances." -msgstr "" -"大部分OpenStack服务使用 Memcached存储瞬时数据,比" -"如令牌。虽然Memcached不支持典型形式的冗余,比如集" -"群,通过配置多个主机名或IP地址OpenStack服务可以使用几乎任意数量的实例。" -"Memcached客户端在实例之间实现对象的哈希均衡。一个" -"实例失败仅影响所有对象的一定比例,并且客户端自动从实例列表中移除它。" - -msgid "" -"Most distributions ship an example configuration file (corosync." -"conf.example) as part of the documentation bundled with the " -"Corosync package. An example Corosync configuration file " -"is shown below:" -msgstr "" -"大多数 Linux 发行版都会中在 Corosync 软件包中附带一份配置" -"示例(corosync.conf.example)。Corosync示例配置文件如下:" - -msgid "" -"Most high availability systems guarantee protection against system downtime " -"and data loss only in the event of a single failure. However, they are also " -"expected to protect against cascading failures, where a single failure " -"deteriorates into a series of consequential failures." -msgstr "" -"大多数的高可用系统只能在发生单一故障的情况下为降低停机时间和避免数据丢失提供" -"保障。但是用户也期望高可用系统同样能够处理由单一故障演变为一系列连锁故障的情" -"况。" - -msgid "" -"Most high availability systems will fail in the event of multiple " -"independent (non-consequential) failures. In this case, most systems will " -"protect data over maintaining availability." -msgstr "" -"大多数高可用系统都无法应对发生一连串不相关故障的情况,此时保护数据优先于保证" -"系统的高可用性。" - -msgid "" -"MySQL is the default database server used by many OpenStack services. Making " -"the MySQL service highly available involves:" -msgstr "" -"MySQL是许多OpenStack服务所使用的默认数据库服务。确保MySQL服务高可用涉及到:" - -msgid "MySQL with Galera" -msgstr "MySQL 和 Galera" - -msgid "" -"MySQL with Galera is by no means the only way to achieve database HA. " -"MariaDB Galera Cluster (https://" -"mariadb.org/) and Percona XtraDB Cluster (http://www.percona.com/) also work with Galera. " -"You also have the option to use PostgreSQL, which has its own replication, " -"or another database HA option." -msgstr "" -"MySQL+Galera绝对不是唯一种实现数据库高可用的方案。MariaDB Galera 集群 (https://mariadb.org/) 和Percona " -"XtraDB集群 (http://www.percona." -"com/) 都可以和Galera一起使用的。除此之外,你可以使用PostgreSQL," -"PostgreSQL有自己的数据同步方案,或者使用其他的数据库高可用方案" - -msgid "NODE" -msgstr "NODE" - -msgid "NODE_IP" -msgstr "NODE_IP" - -msgid "NODE_NAME" -msgstr "NODE_NAME" - -msgid "Network components, such as switches and routers" -msgstr "网络设备,如交换机、路由器;" - -msgid "Network controller cluster stack" -msgstr "网络控制节点 HA 集群配置" - -msgid "Neutron DHCP agent" -msgstr "Neutron DHCP 代理服务" - -msgid "Neutron L2 agent" -msgstr "Neutron L2 代理服务" - -msgid "Neutron L3 agent" -msgstr "Neutron L3 代理服务" - -msgid "Neutron LBaaS agent" -msgstr "Neutron LBaas 代理服务" - -msgid "Neutron metadata agent" -msgstr "Neutron 元数据代理服务" - -msgid "" -"Neutron metadata agent allows Compute API metadata to be reachable by VMs on " -"tenant networks. High availability for the metadata agent is achieved by " -"adopting Pacemaker." -msgstr "" -"Neutron metadata 代理程序的作用是让运行在租户网络上的虚拟机实例能够访问 " -"OpenStack 计算服务 API 元数据。Neutron metadata 代理程序的高可用也通过 " -"Pacemaker 实现。" - -msgid "" -"No other changes are necessary to your OpenStack configuration. If the node " -"currently hosting your RabbitMQ experiences a problem necessitating service " -"failover, your OpenStack services may experience a brief RabbitMQ " -"interruption, as they would in the event of a network hiccup, and then " -"continue to run normally." -msgstr "" -"除此之外,不需要更改其它配置。如果运行数据库服务的节点发生故障,RabbitMQ 服务" -"会自动迁移到其它节点,OpenStack 服务会经历短暂的临时 RabbitMQ 中断,和偶然发" -"生的网络中断类似,之后会继续正常运行。" - -msgid "" -"No other changes are necessary to your OpenStack configuration. If the node " -"currently hosting your database experiences a problem necessitating service " -"failover, your OpenStack services may experience a brief MySQL interruption, " -"as they would in the event of a network hiccup, and then continue to run " -"normally." -msgstr "" -"除此之外,不需要更改其它配置。如果运行数据库服务的节点发生故障,MySQL 服务会" -"自动迁移到其它节点,OpenStack 服务会经历短暂的临时 MySQL 中断,和偶然发生的网" -"络中断类似,之后会继续正常运行。" - -msgid "" -"Note that the Galera cluster configuration commands indicate two of the " -"three controllers are backup. That should be done to ensure only one node " -"serves write requests because OpenStack support for multi-node writes is not " -"production-ready yet." -msgstr "" -"注意,Galera集群配置命令表明3个控制点中的2个节点是备份。因为OpenStack支持多节" -"点写还没有准备好生产使用,应该保证只有一个节点服务器处理写请求。" - -msgid "" -"Occurs when a user-facing service is unavailable beyond a specified maximum " -"amount of time." -msgstr "面向客户的服务无法正常工作的时间超出服务承诺的上限。" - -msgid "Official manual for installing RabbitMQ on Fedora and RHEL" -msgstr "在 Fedora 和 RHEL 发行版上安装 RabbitMQ 的官方文档" - -msgid "Official manual for installing RabbitMQ on Ubuntu and Debian" -msgstr "在 Ubuntu 和 Debian 发行版上安装 RabbitMQ 的官方文档" - -msgid "Official manual for installing RabbitMQ on openSUSE" -msgstr "在 openSUSE 发行版上安装 RabbitMQ 的官方文档" - -msgid "On Fedora and RHEL" -msgstr "对于 Fedora 和 RHEL 发行版" - -msgid "On Havana:" -msgstr "对于 Havana 版本:" - -msgid "On SLES 12:" -msgstr "在SLES 12中:" - -msgid "On Ubuntu and Debian" -msgstr "对于 Ubuntu 和 Debian 发行版" - -msgid "" -"On any host that is meant to be part of a Pacemaker cluster, you must first " -"establish cluster communications through the Corosync messaging layer. This " -"involves installing the following packages (and their dependencies, which " -"your package manager will normally install automatically):" -msgstr "" -"Pacemaker 中的节点服务器之间必须通过 Corosync 建立集群通信,需要安装以下软件" -"包(以及它们的依赖软件包,通常软件包管理器将自动所有依赖软件包):" - -msgid "On node 1:" -msgstr "在节点1:" - -msgid "On nodes 2 and 3:" -msgstr "在节点2和3:" - -msgid "On openSUSE and SLES" -msgstr "对 openSUSE 和 SLES 发行版" - -msgid "On openSUSE:" -msgstr "在 openSUSE 系统中:" - -msgid "" -"On the target nodes ensure the correct owner, group, and permissions of the " -".erlang.cookie file:" -msgstr "" -"在目标节点上确保.erlang.cookie文件拥有正确的拥有者、组和" -"权限:" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the Block Storage API service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"配置完成后,在 crm configure 菜单下输入 commit 提交所有配置变更。随后 Pacemaker 会其中一台节点服务器上启动" -"OpenStack 块设备存储存服务(包括所有相关资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the Ceilometer Central Agent service, and its " -"dependent resources, on one of your nodes." -msgstr "" -"配置完成后,在 crm configure 菜单下输入 commit 提交所有配置变更。随后 Pacemaker 会其中一台节点服务器上启动 " -"Telemetry 监控中心(包括所有相关资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the OpenStack Identity service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"配置完成后,在 crm configure 菜单下输入 commit 提交所有配置变更。随后 Pacemaker 会其中一台节点服务器上启动 " -"OpenStack 身份认证服务(包括所有相关资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the OpenStack Image API service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"配置完成后,在 crm configure 菜单下输入 commit 提交所有配置变更。随后 Pacemaker 会其中一台节点服务器上启动" -"OpenStack 镜像服务(包括所有相关资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the OpenStack Networking API service, and its " -"dependent resources, on one of your nodes." -msgstr "" -"配置完成后,在 crm configure 菜单下输入 commit 提交所有配置变更。随后 Pacemaker 会其中一台节点服务器上启动" -"OpenStack 网络服务(包括所有相关资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the neutron DHCP agent service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"配置完成后,在 crm configure 菜单下输入 commit 提交所有配置变更。随后 Pacemaker 会其中一台节点服务器上启动 neutron " -"DHCP 代理程序(包括所有相关资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the neutron L3 agent service, and its dependent " -"resources, on one of your nodes." -msgstr "" -"配置完成后,在 crm configure 菜单下输入 commit 提交所有配置变更。随后 Pacemaker 会其中一台节点服务器上启动 neutron " -"L3 代理程序(包括所有相关资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the crm configure menu. " -"Pacemaker will then start the neutron metadata agent service, and its " -"dependent resources, on one of your nodes." -msgstr "" -"配置完成后,在 crm configure 菜单下输入 commit 提交所有配置变更。随后 Pacemaker 会其中一台节点服务器上启动 neutron " -"metadata 代理程序(包括所有相关资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the menu. Pacemaker will " -"then start the MySQL service, and its dependent resources, on one of your " -"nodes." -msgstr "" -"配置完成后,在 菜单下输入 commit 提交所有" -"配置变更。随后 Pacemaker 会其中一台节点服务器上启动 MySQL 服务(包括所有相关" -"资源)。" - -msgid "" -"Once completed, commit your configuration changes by entering " -"commit from the menu. Pacemaker will " -"then start the RabbitMQ service, and its dependent resources, on one of your " -"nodes." -msgstr "" -"配置完成后,在 菜单下输入 commit 提交所有" -"配置变更。随后 Pacemaker 会其中一台节点服务器上启动 RabbitMQ 服务(包括所有相" -"关资源)。" - -msgid "" -"Once completed, you may safely return the device to the secondary role. Any " -"ongoing device synchronization will continue in the background:" -msgstr "" -"完成后,可以安全地把设备变回 “secondary” 角色。已经启动的设备同步将在后台继续" -"进行:" - -msgid "" -"Once created, the corosync.conf file (and the " -"authkey file if the option is enabled) " -"must be synchronized across all cluster nodes." -msgstr "" -"corosync.conf (以及 authkey ,如" -"果 启用)一旦创建,则必须在各节点服务器之间保持同步。" - -msgid "" -"Once the Corosync services have been started and you have established that " -"the cluster is communicating properly, it is safe to start pacemakerd, the Pacemaker master control process:" -msgstr "" -"Corosync 服务启动之后,一旦各节点正常建立集群通信,就可启动 pacemakerd ( Pacemaker 主进程):" - -msgid "" -"Once the DRBD resource is running and in the primary role (and potentially " -"still in the process of running the initial device synchronization), you may " -"proceed with creating the filesystem for MySQL data. XFS is generally the " -"recommended filesystem due to its journaling, efficient allocation, and " -"performance:" -msgstr "" -"当 DRBD 资源已经激活并处于 “primay” 角色(可能初始化同步正在进行,还没有完" -"成),可以开始创建文件系统。 XFS 由于拥有日志系统,分配效率高,性能好等优点," -"是建议选择的文件系统。" - -msgid "" -"Once the DRBD resource is running and in the primary role (and potentially " -"still in the process of running the initial device synchronization), you may " -"proceed with creating the filesystem for RabbitMQ data. XFS is generally the " -"recommended filesystem:" -msgstr "" -"当 DRBD 资源已经激活并处于 “primay” 角色(可能初始化同步正在进行,还没有完" -"成),可以开始创建文件系统。 XFS 由于拥有日志系统,分配效率高,性能好等优点," -"是建议选择的文件系统:" - -msgid "" -"Once the Pacemaker services have started, Pacemaker will create a default " -"empty cluster configuration with no resources. You may observe Pacemaker's " -"status with the utility:" -msgstr "" -"Pacemaker 服务启动之后,会自动建立一份空白的集群配置,不包含任何资源。可以通" -"过 工具查看 Packemaker 集群的状态:" - -msgid "" -"Once the output from clustercheck is " -" on all nodes, restart MariaDB on node 1:" -msgstr "" -"在所有节点上,一旦从clustercheck输出是" -",在节点1上重启MariaDB。" - -msgid "" -"Once you have made these changes, you may commit the " -"updated configuration." -msgstr "作完这些改变后,可以提交更新配置。" - -msgid "" -"Once your Pacemaker cluster is set up, it is recommended to set a few basic " -"cluster properties. To do so, start the shell and change " -"into the configuration menu by entering configure. " -"Alternatively, you may jump straight into the Pacemaker configuration menu " -"by typing directly from a shell prompt." -msgstr "" -"Pacemaker 启动之后,建议首先对集群基本属性进行配置。配置时,首先执行 " -" 命令,然后输入 configure 进入配置菜单。也" -"可以执行 命令直接进入 Pacemaker 配置菜单。" - -msgid "Open firewall ports used for MariaDB and Galera communications:" -msgstr "打开MariaDB和Galera通信使用的防火墙端口:" - -msgid "OpenStack" -msgstr "OpenStack" - -msgid "OpenStack Block Storage" -msgstr "OpenStack 块设备存储服务" - -msgid "OpenStack Compute" -msgstr "OpenStack 计算服务" - -msgid "OpenStack Contributors" -msgstr "OpenStack贡献者" - -msgid "OpenStack High Availability Guide" -msgstr "OpenStack高可用指南" - -msgid "" -"OpenStack Identity is the Identity Service in OpenStack and used by many " -"services. Making the OpenStack Identity service highly available in active / " -"passive mode involves" -msgstr "" -"OpenStack 身份认证服务被很多其他服务使用。实现 OpenStack 身份认证服务主/从模" -"式的高可用包括以下步骤:" - -msgid "OpenStack Networking" -msgstr "OpenStack 网络服务" - -msgid "" -"OpenStack Networking is the network connectivity service in OpenStack. " -"Making the OpenStack Networking Server service highly available in active / " -"passive mode involves the following tasks:" -msgstr "" -"OpenStack 网络服务为 OpenStack 集群提供网络基础服务。实现 OpenStack 网络服务" -"主/从模式的高可用包括以下步骤:" - -msgid "OpenStack controller nodes" -msgstr "OpenStack 控制节点服务器" - -msgid "OpenStack controller nodes contain:" -msgstr "OpenStack 控制节点服务器上运行以下服务:" - -msgid "" -"OpenStack currently meets such availability requirements for its own " -"infrastructure services, meaning that an uptime of 99.99% is feasible for " -"the OpenStack infrastructure proper. However, OpenStack does not guarantee " -"99.99% availability for individual guest instances." -msgstr "" -"OpenStack 的基础服务,在合理配置的情况下,能够满足上述 99.99% 在线时间的高可" -"用性要求。但是 OpenStack 不能保证单个虚拟机实例的 99.99% 在线时间。" - -msgid "OpenStack network nodes" -msgstr "OpenStack 网络节点服务器" - -msgid "OpenStack network nodes contain:" -msgstr "OpenStack 网络节点运行以下服务:" - -msgid "" -"OpenStack schedulers are used to determine how to dispatch compute, network, " -"and volume requests. The most common setup is to use RabbitMQ as a messaging " -"system. Those services are connected to the messaging back end and can scale " -"out:" -msgstr "" -"OpenStack 调度程序决定如何分派对计算资源、网络资源、存储卷源的请求。一般情况" -"下都会使用 RabbitMQ 作为消息队列服务。以下服务使用消息队列作为后端,可以分布" -"在多个节点服务器上运行:" - -msgid "Option" -msgstr "选项" - -msgid "Organizes guide based on cloud controller and compute nodes." -msgstr "本指南将介绍如何安装控制节点和计算节点" - -msgid "PASSWORD" -msgstr "密码" - -msgid "PRIMARY_NODE_IP" -msgstr "PRIMARY_NODE_IP" - -msgid "" -"Pacemaker interacts with applications through resource agents (RAs), of " -"which it supports over 70 natively. Pacemaker can also easily use third-" -"party RAs. An OpenStack high-availability configuration uses existing native " -"Pacemaker RAs (such as those managing MySQL databases or virtual IP " -"addresses), existing third-party RAs (such as for RabbitMQ), and native " -"OpenStack RAs (such as those managing the OpenStack Identity and Image " -"services)." -msgstr "" -"Pacemaker 通过资源代理程序(RAs)(默认提供了 70 多种)和应用程序进行交互,在 " -"Pacemaker 集群中应用第三方资源代理程序(RAs)也非常容易。OpenStack 高可用配置中" -"使用了 Pacemaker 自带的资源代理程序RAs(如 MySQL 数据库服务、虚拟 IP 地址" -"等)、已有的第三方资源代理程序(如 RabbitMQ 服务)以及 OpenStack 资源代理程序" -"RAs(如 OpenStack 身份认证服务、磁盘镜像服务)。" - -msgid "" -"Pacemaker requires that both nodes have different hostnames. Because of " -"that, RA scripts could require some adjustments since the Networking " -"scheduler will be aware of one node, for example a virtual router attached " -"to a single L3 node. For example, both nodes could set different hostnames " -"in the configuration files, and when the l3-agent started by Pacemaker, the " -"node's hostname will be changed to network-controller automatically. " -"Whichever node starts the l3-agent will have the same hostname." -msgstr "" -"Pacemaker 要求所有的节点服务器使用不用的主机名,而 OpenStack 网络服务调度程序" -"只会关注其中一台(例如,一个虚拟路由器只能在其中一台运行 L3 代理的服务上启" -"动),因此需要对 RA (Pacemaker 资源代理程序) 脚本进行相应修改。比如,所有的" -"节点先各自在配置文件中配置不同的主机名,当 Pacemaker 启动 l3-agent 时,自动将" -"该节点的主机名改为 network-controller,这样所有启动 l3-agent 的节点会使用相同" -"的主机名。" - -msgid "" -"Pacemaker uses an event-driven approach to cluster state processing. " -"However, certain Pacemaker actions occur at a configurable interval, " -", which defaults to 15 minutes. It is usually prudent to " -"reduce this to a shorter interval, such as 5 or 3 minutes." -msgstr "" -"Pacemaker 处理集群状况时使用事件驱动机制。但是某些 Pacemaker 操作只会在固定的" -"时间间隔触发。该时间间隔可以配置,,默认值是 15 分钟。针对特" -"定的集群,可以适当缩短这一间隔,如 5 分钟或者 3 分钟。" - -msgid "Paste the following lines in this file:" -msgstr "将如下内容粘贴到文件中:" - -msgid "" -"Please refer to the RabbitMQ section for " -"configuring these services with multiple messaging servers." -msgstr "" -"请参阅 RabbitMQ 部分 将 OpenStack 各组件" -"配置为可同时使用多个消息队列服务器。" - -msgid "Possible options are:" -msgstr "可用的节点服务器配置项有:" - -msgid "Prepare MySQL for Pacemaker high availability" -msgstr "MySQL 针对 Pacemaker HA 架构的前期准备" - -msgid "Prepare RabbitMQ for Pacemaker high availability" -msgstr "RabbitMQ 针对 Pacemaker HA 架构的前期准备" - -msgid "" -"Preventing single points of failure can depend on whether or not a service " -"is stateless." -msgstr "避免单点故障的方法根据该服务是否属于无状态类型而有所不同。" - -msgid "" -"Provider enables votequorum library, this is the only " -"required option." -msgstr "提供者 启用votequorum库,这是唯一必须的选项。" - -msgid "Query the quorum status" -msgstr "查询quorum状态" - -msgid "RabbitMQ" -msgstr "RabbitMQ" - -msgid "RabbitMQ HA cluster host:port pairs:" -msgstr "RabbitMQ HA 集群服务地址及端口:" - -msgid "RabbitMQ is packaged on both distros:" -msgstr "RabbitMQ 已经有可用的软件安装包:" - -msgid "" -"RabbitMQ is the default AMQP server used by many OpenStack services. Making " -"the RabbitMQ service highly available involves the following steps:" -msgstr "" -"RabbitMQ 是多数 OpenStack 服务的默认 AMQP 服务程序。实现 RabbitMQ 的高可用包" -"括以下步骤:" - -msgid "" -"RabbitMQ is the default AMQP server used by many OpenStack services. Making " -"the RabbitMQ service highly available involves:" -msgstr "" -"RabbitMQ 是多数 OpenStack 服务的默认 AMQP 服务程序。实现 RabbitMQ 的高可用包" -"括以下步骤:" - -msgid "Receive notifications of quorum state changes" -msgstr "获取quorum状态改变的通知" - -msgid "" -"Red Hat-based distributions include Galera packages in their repositories. " -"To install the most current version of the packages, run the following " -"command:" -msgstr "" -"基于Red Hat的发型版本在它们的仓库中包含Galera包。为了安装最新版本的安装包,运" -"行以下命令:" - -msgid "" -"Regardless of the approach, the steps outlined here must be completed on " -"only one cluster node." -msgstr "这里列出的这些步骤只需要在其中一个集群节点上操作一遍即可。" - -msgid "Remove user accounts with empty user names because they cause problems:" -msgstr "删除所有用户名为空的 MySQL 帐户(这些帐户会产生安全隐患):" - -msgid "" -"Replace PRIMARY_NODE_IP and " -"SECONDARY_NODE_IP with the IP addresses of your " -"primary and secondary servers." -msgstr "" -"使用您主服务器和次服务器的IP地址替换PRIMARY_NODE_IPSECONDARY_NODE_IP" - -msgid "" -"Replace PRIMARY_NODE_IP with the hostname of the " -"server. This is set for logging." -msgstr "" -"使用服务器的主机名替换PRIMARY_NODE_IP。这项配置用" -"于日志。" - -msgid "Run OpenStack API and schedulers" -msgstr "运行 OpenStack API 和 调度服务" - -msgid "Run neutron DHCP agent" -msgstr "运行 neutron DHCP 代理服务" - -msgid "Run neutron L3 agent" -msgstr "运行 neutron L3 代理服务" - -msgid "Run neutron LBaaS agent" -msgstr "运行 neutron LBaas 代理服务" - -msgid "Run neutron metadata agent" -msgstr "运行 neutron 元数据代理服务" - -msgid "Run the following commands on all nodes except the first one:" -msgstr "在除第一台节点之外的其它节点服务器上运行下面的命令:" - -msgid "SECONDARY_NODE_IP" -msgstr "SECONDARY_NODE_IP" - -msgid "Schedulers" -msgstr "调度程序" - -msgid "" -"Selecting and assigning a virtual IP address (VIP) that can freely float " -"between cluster nodes" -msgstr "选择并绑定一个可以在各集群节点之间迁移的虚拟 IP 地址 (即 VIP )" - -msgid "Services currently working with HA queues:" -msgstr "目前支持高可用 RabbitMQ 服务的 OpenStack 组件有:" - -msgid "Set basic cluster properties" -msgstr "设置集群基本属性" - -msgid "Set up Corosync" -msgstr "Corosync 基本配置" - -msgid "Set up Corosync with multicast" -msgstr "配置 Corosync 使用组播" - -msgid "Set up Corosync with unicast" -msgstr "配置 Corosync 使用单播" - -msgid "" -"Setting is required in 2-node Pacemaker clusters for the " -"following reason: if quorum enforcement is enabled, and one of the two nodes " -"fails, then the remaining node can not establish a majority of quorum votes " -"necessary to run services, and thus it is unable to take over any resources. " -"In this case, the appropriate workaround is to ignore loss of quorum in the " -"cluster. This should only only be done in 2-node clusters: do not set this " -"property in Pacemaker clusters with more than two nodes. Note that a two-" -"node cluster with this setting exposes a risk of split-brain because either " -"half of the cluster, or both, are able to become active in the event that " -"both nodes remain online but lose communication with one another. The " -"preferred configuration is 3 or more nodes per cluster." -msgstr "" -"对于 2 个节点的 Pacemaker 集群,集群属性 是必须配置的,因" -"为:如果强制集群满足合法节点数要求,当 其中一个节点失效时,剩下的一个节点无法" -"达到集群多数节点在线的要求,从而不能接管原来运行在失效节点上的集群资源。这种" -"情况下,解决方法只能是忽略集群合法节点数要求。但是这一属性只能用于 2 个节点的" -"集群,对于 3 个节点及以上的集群来说,是不应该配置该属性的。需要注意的是,2 个" -"节点的集群配置该属性之后,会出现脑裂( split-brain)的风险,这是因为当 2 个节" -"点都在线但是互相无法通信时,2 个节点都认为对方出现故障,从而尝试接管对方的集" -"群资源。因此建议布署 3 个节点及以上的集群。" - -msgid "" -"Setting , and to 1000 " -"instructs Pacemaker to keep a longer history of the inputs processed, and " -"errors and warnings generated, by its Policy Engine. This history is " -"typically useful in case cluster troubleshooting becomes necessary." -msgstr "" -"将 以及 设置为 1000,是" -"为了让 Pacemaker 保存更多 Policy Engine 的处理输入、错误以及警告信息。这些历" -"史记录对排除集群故障会有很大帮忙。" - -msgid "" -"Setup the repository for Ubuntu 14.04 \"trusty\" (most recent). Install the " -"software properties, the key, and the repository:" -msgstr "为 Ubuntu 14.04 \"trusty\" (最常用)设置源。安装软件属性,密钥及源:" - -msgid "" -"Some environments may not support multicast. For such cases, Corosync should " -"be configured for unicast. An example fragment of the Corosync configuration " -"file is shown below:" -msgstr "" -"某些环境中可能不支持组播。这时应该配置 Corosync 使用单播,下面是使用单播的 " -"Corosync 配置文件的一部分:" - -msgid "Start MariaDB cluster:" -msgstr "启动MariaDB集群:" - -msgid "Start Pacemaker" -msgstr "启动 Pacemaker" - -msgid "Start RabbitMQ on all nodes and verify the nodes are running:" -msgstr "在所有节点上启动RabbitMQ,确保所有节点都处于运行状态:" - -msgid "Start mysql as root and execute the following queries:" -msgstr "使用root启动mysql并执行以下查询:" - -msgid "Start xinetd (required by clustercheck):" -msgstr "启动xinetd(clustercheck需要):" - -msgid "Starting Corosync" -msgstr "启动Corosync" - -msgid "Stateless vs. Stateful services" -msgstr "无状态和有状态服务" - -msgid "" -"Stop all the mysql servers and start the first server with the following " -"command:" -msgstr "停止所有的mysql服务器然后使用如下命令启动第一个服务器:" - -msgid "Storage components" -msgstr "存储设备;" - -msgid "System downtime" -msgstr "系统停机" - -msgid "TERTIARY_NODE_IP" -msgstr "TERTIARY_NODE_IP" - -msgid "Telemetry" -msgstr "Telemetry" - -msgid "" -"Telemetry (ceilometer) is the metering and monitoring service in OpenStack. " -"The Central agent polls for resource utilization statistics for resources " -"not tied to instances or compute nodes." -msgstr "" -"Telemtry ( ceilometer )是 OpenStack 系统中的计量和监控服务。监控中心收集包" -"括虚拟机实例和计算节点在内各种资源的使用情况。" - -msgid "Telemetry Central agent" -msgstr "Telemetry中央代理" - -msgid "" -"The admin_bind_host option lets you use a private network " -"for the admin access." -msgstr "" -"admin_bind_host选项使您可以通过私有网络进行管理任务。" - -msgid "" -"The service declaration for the pacemaker service may be placed in the corosync.conf " -"file directly, or in its own separate file, /etc/corosync/service." -"d/pacemaker." -msgstr "" -"pacemaker 对应的 service 配置段,可以放" -"在 corosync.conf ,也可以单独作为一个配置文件 " -"/etc/corosync/service.d/pacemaker 。" - -msgid "" -"The directive controls the transport mechanism used. To " -"avoid the use of multicast entirely, a unicast transport parameter " -" should be specified. This requires specifying the list of " -"members in directive; this could potentially make up the " -"membership before deployment. The default is . The transport " -"type can also be set to or ." -msgstr "" -" 配置项决定集群通信方式。要完全禁用组播,应该配置单播传输参" -"数 。这要求将所有的节点服务器信息写入 ,也" -"就是需要在配署 HA 集群之前确定节点组成。配认配置是 。通信方" -"式类型还支持 。" - -msgid "" -"The is the network address of the interfaces to bind to. " -"The example uses two network addresses of /24 IPv4 " -"subnets." -msgstr "" -" 是心跳网卡 IP 地址对应的网络地址。示例中使用了两个子网掩码" -"为 /24 的 IPv4 网段。" - -msgid "" -"The must differ between all configured interfaces, starting " -"with 0." -msgstr "所有心跳网络的 配置不能重复,最小值为 0 。" - -msgid "" -"The utility can be used to dump the Corosync cluster member " -"list:" -msgstr " 命令可以列出 Corosync 集群的成员节点列表:" - -msgid "" -"The utility, when invoked with the option, " -"gives a summary of the health of the communication rings:" -msgstr "" -" ,执行时加上 参数,可以获取整个集群通信的" -"健康情况:" - -msgid "" -"The value specifies the time, in milliseconds, during which " -"the Corosync token is expected to be transmitted around the ring. When this " -"timeout expires, the token is declared lost, and after lost " -"tokens the non-responding processor (cluster node) is declared dead. In " -"other words, × is the maximum time a node " -"is allowed to not respond to cluster messages before being considered dead. " -"The default for is 1000 (1 second), with 4 allowed " -"retransmits. These defaults are intended to minimize failover times, but can " -"cause frequent \"false alarms\" and unintended failovers in case of short " -"network interruptions. The values used here are safer, albeit with slightly " -"extended failover times." -msgstr "" -" 是时间,单位为毫秒,在该配置项指定的时间内, Corosync 令牌应" -"该完成在回环网络中的传输。如果令牌传输超时就会被丢弃,而一台节点服务器连续出" -"现 令牌失效,将会被认为是无效节点。也就是说,一台节点服务器" -"最长的无响应时间不能超对 × 的乘积(单位毫" -"秒),否则会被认为是无效节点。 的默认值是 1000 (即 1 秒)," -"同时默认的重试次数为 4 。默认配置的目标是尽量缩短故障恢复时间,但是可能出现较" -"多的 “false alarm” 提醒,发生短期的网络故障时也有可能导致失效切换。本处示例中" -"的配置参数更安全一些,但是失效切换的时间会长一些。" - -msgid "" -"The API node exposes OpenStack API endpoints onto external network " -"(Internet). It must talk to the cloud controller on the management network." -msgstr "" -"API 服务节点对外(整个互联网)提供 OpenStack API 接口。它们通过管理网络和 " -"OpenStack 控制节点进行交互。" - -msgid "" -"The Galera Cluster plug-in is a multi-master Cluster based on synchronous " -"replication. It is a high availability solution, which provides high system " -"uptime, no data loss, and scalability for growth." -msgstr "" -"Galera集群插件是基于同步复制的多个主的集群。它是一个高可用解决方案,提供高系" -"统运行时间,无数据丢失和为了增长的横向扩展。" - -msgid "" -"The OpenStack Image service offers a service for discovering, registering, " -"and retrieving virtual machine images. To make the OpenStack Image API " -"service highly available in active / passive mode, you must:" -msgstr "" -"Openstack镜像服务提供一个发现、注册和获取虚拟机镜像的服务。为了使OpenStack镜" -"像API服务以主/备模式高可用。您必须:" - -msgid "" -"The OpenStack Networking service has a scheduler that lets you run multiple " -"agents across nodes. Also, the DHCP agent can be natively highly available. " -"You can configure the number of DHCP agents per network using the parameter " -" in neutron.conf. By default this is " -"equal to 1. To achieve high availability assign more than one DHCP agent per " -"network." -msgstr "" -"OpenStack 网络服务拥有一个调度程序,所有可以在多个节点同时运行各种代理程序。" -"同样,DHCP 代理服务自身就是支持高可用的。每个网络使用多少 DHCP 代理程序是可以" -"通过配置文件 neutron.conf 中的 进行配置" -"的。默认值是 1 ,要实现 DHCP 代理服务的高可用,应为每个网络设置多个 DHCP 代理" -"程序。" - -msgid "" -"The Pacemaker based MySQL server requires a DRBD resource from which it " -"mounts the /var/lib/mysql directory. In this example, the " -"DRBD resource is simply named mysql:" -msgstr "" -"基于 Pacemaker 的 MySQL 数据库需要一个 DRBD 设备,并将之挂载到 /var/" -"lib/mysql 目录。在示例中,DRBD 资源被简单命名为 mysql:" - -msgid "" -"The Pacemaker based RabbitMQ server requires a DRBD resource from which it " -"mounts the /var/lib/rabbitmq directory. In this " -"example, the DRBD resource is simply named rabbitmq:" -msgstr "" -"基于 Pacemaker 的 RabbitMQ 服务需要一个 DRBD 设备,并将之挂载到 /" -"var/lib/rabbitmq 目录。在示例中,DRBD 资源被简单命名为 " -"rabbitmq:" - -msgid "The Pacemaker cluster stack" -msgstr "Packmaker 集群" - -msgid "" -"The below query should be run on every server except the primary node. This " -"will make sure that you can restart the database again. Do not forget to add " -"the password from the debian.cnf. To do this, run:" -msgstr "" -"在除了主节点的服务器上运行以下查询。这将确保您能再次重启服务器。不要忘记从" -"debian.cnf天降密码。为了完成这个,运行:" - -msgid "" -"The choice of database is not a foregone conclusion; you are not required to " -"use MySQL. It is, however, a fairly common choice " -"in OpenStack installations, so we will cover it here." -msgstr "" -"MySQL 并不是唯一的选择,以它作为示例是因为目前已有" -"的 OpenStack 布署案例中,使用 MySQL 作为数据库比较" -"常见。" - -msgid "" -"The cloud controller runs on the management network and must talk to all " -"other services." -msgstr "" -"OpenStack 控制服务运行在管理网络上,可以和其它任何 OpenStack 服务进行交互。" - -msgid "" -"The cluster is fully operational with 7 nodes (each node " -"has 1 vote), quorum: 4. If list of nodes is specified as " -"the is ignored." -msgstr "" -"集群拥有7个节点(每个节点有1票)才能完整使用,quorum:4。如" -"中指定的节点列表,被忽略。" - -msgid "" -"The first step is installing the database that sits at the heart of the " -"cluster. When we talk about high availability (HA), we talk about several " -"databases (for redundancy) and a means to keep them synchronized. In this " -"case, we choose the MySQL database, along with " -"Galera plug-in for synchronous multi-master " -"replication." -msgstr "" -"第一步部署数据库,数据库是整个集群的心脏。当讨论高可用(HA)时,我们讨论几个数" -"据库(冗余)和一种保持数据库同步的方式。这种情况下,为了多个主控的同步复制,我" -"们选择Galera插件的 MySQL数据库。" - -msgid "" -"The monitor check is quite simple since it just establishes a TCP connection " -"to the API port. Comparing to the active/passive mode using " -"Corosync and resource agents, we do not check if " -"the service is actually running. That is why all OpenStack API services " -"should be monitored by another tool, for example Nagios." -msgstr "" -"监控方法比较简单,只是向 API 服务绑定的端口发起一个 TCP 连接。和主/从模式的高" -"可用方案使用 Corosync 和资源代理脚本不同,这种监控" -"方法不会检查这些服务是否在运行。因此所有的 OpenStack API 服务还需要通过工具作" -"进一步的监控,比如 Nagios 。" - -msgid "" -"The network controller sits on the management and data network, and needs to " -"be connected to the Internet if an instance will need access to the Internet." -msgstr "" -"网络控制节点运行在管理网络和数据网络中,如果虚拟机实例要连接到互联网,网络控" -"制节点也需要具备互联网连接。" - -msgid "" -"The neutron DHCP agent distributes IP addresses to the VMs with dnsmasq (by " -"default). High availability for the DHCP agent is achieved by adopting " -"Pacemaker." -msgstr "" -"Neutron DHCP 代理程序使用 dnsmasq (默认情况下)为虚拟机实例分配 IP 地址。" -"Neutron DHCP 代理程序高可用也通过 Pacemaker 实现。" - -msgid "" -"The neutron L2 agent does not need to be highly available. It has to be " -"installed on each data forwarding node and controls the virtual networking " -"drivers as Open vSwitch or Linux Bridge. One L2 agent runs per node and " -"controls its virtual interfaces. That's why it cannot be distributed and " -"highly available." -msgstr "" -"Neutron L2 代理服务不需要实现高可用。在所有提供数据转发的服务器上都要安装 " -"Neutron L2 代理程序,对诸如 Open vSwitch 、Linux Bridge 等虚拟网络驱动进行管" -"理。每台节点服务器各运行一个 L2 代理程序,负责管理该节点上的虚拟网络接口。这" -"也是 Neutron L2 代理服务无法实现多节点分布以及高可用的原因。" - -msgid "" -"The neutron L3 agent is scalable, due to the scheduler that allows " -"distribution of virtual routers across multiple nodes. The following options " -"are available to make a router highly available:" -msgstr "" -"Neutron的三层L3代理是可扩展的,取决于允许跨多个节点虚拟路由器分发的调度器。以" -"下选项用来使一个路由器高可用:" - -msgid "" -"The neutron L3 agent provides L3/NAT forwarding to ensure external network " -"access for VMs on tenant networks. High availability for the L3 agent is " -"achieved by adopting Pacemaker." -msgstr "" -"Neutron L3 代理程序负责实现 L3/NAT 转发,让运行在租户网络上的虚拟机实例能够访" -"问外部网络。Neutron L3 代理程序实现高可用也基于 Pacemaker 。" - -msgid "" -"The packages are signed by GPG key 893A90DAD85F9316. You should verify the " -"fingerprint of the imported GPG key before using it." -msgstr "" -"这些软件包都使用 GPG 密钥 893A90DAD85F9316 进行了签名,在安装之前可以先验证签" -"名。" - -msgid "The result will look like this:" -msgstr "结果将如下所示:" - -msgid "Then, set the following properties:" -msgstr "然后,设置下列属性:" - -msgid "" -"There are several things to note about the recommended interface " -"configuration:" -msgstr "在推荐的网络接口配置中有几件事需要注意:" - -msgid "" -"There is no native feature to make this service highly available. At this " -"time, the Active / Passive solution exists to run the neutron metadata agent " -"in failover mode with Pacemaker. See the active/passive section of this " -"guide." -msgstr "" -"Neutron 元数据代理服务自身是不支持高可用的。目前针对 Neutron L3 代理服务只有" -"主/从模式的高可用方案通过 Pacemaker 可以实现失效切" -"换。参阅本手册的 主/从模式高可用架" -"构部分。" - -msgid "" -"These are some of the more common ways to implement these high availability " -"architectures, but they are by no means the only ways to do it. The " -"important thing is to make sure that your services are redundant, and " -"available; how you achieve that is up to you. This document will cover some " -"of the more common options for highly available systems." -msgstr "" -"上面提到的是较为常见的高可用实现方案,但是并非只有这些方案可以实现系统的高可" -"用。基本原则只是保证服务冗余和可用,具体如何实现则是视需求而定的。本文档会提" -"供如何实现高可用系统的一些通用建议。" - -msgid "This configuration creates" -msgstr "这个配置创建" - -msgid "" -"This configuration creates p_ip_api, a virtual IP address " -"for use by the API node (192.168.42.103):" -msgstr "" -"该配置新建了一个 p_ip_mysql 资源,是 API 节点将会使用的虚" -"拟 IP 地址(192.168.42.103):" - -msgid "" -"This configuration creates p_keystone, a resource for " -"managing the OpenStack Identity service." -msgstr "" -"该配置增加 p_keystone 资源,对 OpenStack 身份认证服务进行" -"管理。" - -msgid "" -"This configuration creates p_neutron-server, a resource " -"for manage OpenStack Networking Server service" -msgstr "" -"该配置增加 p_neutron-server 资源,对 OpenStack 网络服务进" -"行管理。" - -msgid "This configuration creates:" -msgstr "该配置会创建:" - -msgid "" -"This guide assumes that three nodes are used to form the MariaDB Galera " -"cluster. Unless otherwise specified, all commands need to be executed on all " -"cluster nodes." -msgstr "" -"本教程假设3个节点用来搭建MariaDB Galera集群。除非明确指定,所有命令需要在所有" -"集群节点上执行。" - -msgid "" -"This guide describes how to install, configure, and manage OpenStack for " -"high availability." -msgstr "本手册将对如何实现 OpenStack 各服务的高可用进行说明。" - -msgid "" -"This guide has gone through editorial changes to follow the OpenStack " -"documentation conventions. Various smaller issues have been fixed." -msgstr "本手册根据 OpenStack 文档规范进行了修订,改正了不少细微的错误。" - -msgid "" -"This guide has various updates for the Kilo release, such as adding MariaDB, " -"updates to the MySQL information, corosync and networking updates." -msgstr "" -"本教程针对Kilo版本有多项更新,比如添加MariaDB,更新MySQL信息,corosync和网络" -"更新。" - -msgid "" -"This method does not ensure a zero downtime since it has to recreate all the " -"namespaces and virtual routers on the node." -msgstr "" -"这种高可用方案不能实现“零停机”需求,原因是neutron L3 代理程序切换时需要重新创" -"建网络命名空间和虚拟路由器。" - -msgid "" -"This might also mean configuring any NAT firewall between nodes to allow " -"direct connections. You might need to disable SELinux, or configure it to " -"allow mysqld to listen to sockets " -"at unprivileged ports." -msgstr "" -"在某些环境中,可能还需要对 NAT 防火墙进行配置,以保证节点服务器之间可以直接通" -"信。另外,可能需要禁用 SELinux,或者允许 mysqld 监听非特权端口。" - -msgid "" -"This resource uses an underlying local disk (in DRBD terminology, a backing " -"device) named /dev/data/mysql on both cluster nodes, " -"node1 and node2. Normally, this would " -"be an LVM Logical Volume specifically set aside for this purpose. The DRBD " -"meta-disk is internal, meaning DRBD-specific metadata is being stored at the " -"end of the disk device itself. The device is configured to communicate " -"between IPv4 addresses 10.0.42.100 and " -"10.0.42.254, using TCP port 7700. Once enabled, it will " -"map to a local DRBD block device with the device minor number 0, that is, " -"/dev/drbd0." -msgstr "" -"该资源使用了一块本地磁盘(DRBD 术语为“后端设备”, a backing device),该磁盘" -"在两台节点服务器( node1node2 )上" -"对应相同的设备文件 —— /dev/data/mysql ,一 般情况下,该" -"磁盘是一个专门为此配置的 LVM 逻辑卷。meta-disk 配置项的值是 internal,指的是 " -"DRBD 元数据保存在后端设备的结尾(即元数据和实际数据保存在同一存储设备上)。设" -"备数据同步通过 10.0.42.10010.0.42.254 完成,使用 TCP 7700 端口。当 DRBD 资源激活之后,系统中将对应生成一" -"个 DRBD 设备文件,次设备号为 0 ,设备文件是 /dev/drbd0 。" - -msgid "" -"This resource uses an underlying local disk (in DRBD terminology, a backing " -"device) named /dev/data/rabbitmq on both cluster nodes, " -"node1 and node2. Normally, this would " -"be an LVM Logical Volume specifically set aside for this purpose. The DRBD " -"meta-disk is internal, meaning DRBD-specific metadata is being stored at the " -"end of the disk device itself. The device is configured to communicate " -"between IPv4 addresses 10.0.42.100 and " -"10.0.42.254, using TCP port 7701. Once enabled, it will " -"map to a local DRBD block device with the device minor number 1, that is, " -"/dev/drbd1." -msgstr "" -"该资源使用了一块本地磁盘(DRBD 术语为“后端设备”, a backing device),该磁盘" -"在两台节点服务器( node1node2 )上" -"对应相同的设备文件 ——/dev/data/rabbitmq ,一 般情况下," -"该磁盘是一个专门为此配置的 LVM 逻辑卷。meta-disk 配置项的值是 internal,指的" -"是 DRBD 元数据保存在后端设备的结尾(即元数据和实际数据保存在同一存储设备" -"上)。设备数据同步通过 10.0.42.100 和 " -"10.0.42.254 完成,使用 TCP 7701 端口。当 DRBD 资源激活之" -"后,系统中将对应生成一个 DRBD 设备文件,次设备号为 1 ,设备文件是 " -"/dev/drbd1 。" - -msgid "" -"To be sure all data will be highly available, you should be sure that you " -"store everything in the MySQL database (which is also highly available):" -msgstr "" -"为了保证所有的数据都是高可用的,应使用 MySQL 数据库服务(同样也要保证 MySQL " -"服务是高可用的):" - -msgid "" -"To do so, stop RabbitMQ everywhere and copy the cookie from the first node " -"to the other node(s):" -msgstr "" -"因此,首先在所有节点服务器停止 RabbitMQ 服务,然后将第一台节点服务上的 " -"cookis 文件复制到其它节点:" - -msgid "" -"To ensure that all queues, except those with auto-generated names, are " -"mirrored across all running nodes it is necessary to set the policy key " -"ha-mode to all. Run the following " -"command on one of the nodes:" -msgstr "" -"为了保证除了自动生动的队列之外的其它队列能在所有节点上实现镜像,必须将 " -"RabbitMQ 策略配置项 ha-mode 设置为 all。在集群中任何一台节点服务器上执行下面的命令。" - -msgid "To install MariaDB with Galera" -msgstr "安装带Galera的MariaDB" - -msgid "" -"To use highly available and scalable API services, we need to ensure that:" -msgstr "要实现高可用和可扩展的 API 服务,需要保证:" - -msgid "To verify the cluster status:" -msgstr "检查集群状态:" - -msgid "" -"Typically, an active/active installation for a stateless service would " -"maintain a redundant instance, and requests are load balanced using a " -"virtual IP address and a load balancer such as HAProxy." -msgstr "" -"通常,无状态服务“主/主”模式的高可用会维护冗余的服务实例,同时通过虚拟 IP 地址" -"以及负载调度程序(如 HAProxy )对客户端的请求进行负载均衡。" - -msgid "" -"Typically, an active/passive installation for a stateless service would " -"maintain a redundant instance that can be brought online when required. " -"Requests may be handled using a virtual IP address to facilitate return to " -"service with minimal reconfiguration required." -msgstr "" -"通常情况下,针对无状态服务实现“主/从”模式的高可用是维护该服务的一个冗余实例," -"在必要时,这一实例会被激活。客户端的请求统一发送到一个虚拟的 IP 地址(该地址" -"指向实际的后端服务),这样当发生切换时,后端服务和客户端几乎不需要进行任何改" -"动。" - -msgid "Update your system and install the required packages: " -msgstr "升级您的系统并安装必要的软件包:" - -msgid "Use HA queues in RabbitMQ (x-ha-policy: all):" -msgstr "否使用 RabbitMMQ 的队列镜像特性( x-ha-policy: all ):" - -msgid "" -"Use Layer 3 High Availability with VRRP. The following configuration options " -"need to be set in /etc/neutron/neutron.conf to enable " -"it:" -msgstr "" -"通过VRRP,使用3层高可用。为了启动它,需要在/etc/neutron/neutron." -"conf 文件设置以下配置项:" - -msgid "Use durable queues in RabbitMQ:" -msgstr "是否使用持久的消息队列:" - -msgid "" -"Using the active/passive solution to run the Neutron L3 agent in failover " -"mode with Pacemaker. See the active/passive section of this guide." -msgstr "" -"借助Pacemaker,使用主/备解决方案把Neuttron L3代理" -"运行在失效恢复模式。参考本手册主/备" -"章节" - -msgid "Value to set" -msgstr "设置的值" - -msgid "" -"Verify the wsrep replication by logging in as root under mysql and running " -"the following command:" -msgstr "通过以root身份登录mysql并运行以下命令来确认wsrep复制:" - -msgid "Votequorum service configuration within Corosync" -msgstr "Votequorum服务在Corosync中配置" - -msgid "" -"We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, " -"a logical grouping of several Erlang nodes." -msgstr "" -"将多个 RabbitMQ 节点组织成一个集群,构建一个 RabbitMQ broker 服务,即一个 " -"Erlang 节点的逻辑集合。" - -msgid "" -"We consider that we run (at least) two RabbitMQ servers and we call the " -"nodes rabbit1 and rabbit2. To build a " -"broker, we need to ensure that all nodes have the same Erlang cookie file." -msgstr "" -"示例中会布署 2 台 RabbitMQ 服务器,rabbit1 和 " -"rabbit2。要构建一个 RabbitMQ broker 服务,必须保证所有节点" -"服务器的 Erlang cookie 文件完全相同。" - -msgid "" -"We have to configure the OpenStack components to use at least two RabbitMQ " -"nodes." -msgstr "" -"现在可以配置 OpenStack 其它组件使用高可用 RabbitMQ 集群(最少使用其中 2 台节" -"点服务器)。" - -msgid "" -"We have to consider that while exchanges and bindings will survive the loss " -"of individual nodes, queues and their messages will not because a queue and " -"its contents is located on one node. If we lose this node, we also lose the " -"queue." -msgstr "" -"单个节点服务器的故障不会导致消息的交换和绑定完全不可用,但是具体一个消息队列" -"及其中的内容则相反。原因是消息队列和其中的内容只在其中一台节点上,该节点出现" -"故障无法工作时,整个消息队列就丢失了。" - -msgid "" -"With enabled, Corosync nodes mutually authenticate using a " -"128-byte shared secret stored in /etc/corosync/authkey, " -"which may be generated with the utility. When using " -", cluster communications are also encrypted." -msgstr "" -"当启用 时,Corosync 节点之间通信时会使用一个 128 位的密钥进" -"行双向认证。密钥存放在 /etc/corosync/authkey 文件中,可" -"以通过 命令生成。启用 后,集群通信数据也会" -"进行加密。" - -msgid "" -"Within the directive, it is possible to specify specific " -"information about nodes in cluster. Directive can contain only the " -" sub-directive, which specifies every node that should be a " -"member of the membership, and where non-default options are needed. Every " -"node must have at least the field filled." -msgstr "" -"在 之下可以为某一节点设置只与该节点相关的信息,这些设置项只" -"能包含在 之中,即只能对属于集群的节点服务器进行设置,而且只" -"应包括那些与默认设置不同的参数。每台服务器都必须配置 。" - -msgid "You also need to create the OpenStack Identity Endpoint with this IP." -msgstr "在 OpenStack 身份认证服务中需要为该 IP 地址创建对应的服务端点。" - -msgid "" -"You can now add the Pacemaker configuration for Block Storage API resource. " -"Connect to the Pacemaker cluster with crm configure, and " -"add the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 OpenStack 块设备存储服务相关资源。执行 " -"crm configure 命令进入 Pacemaker 配置菜单,然后加入下列集" -"群资源:" - -msgid "" -"You can now add the Pacemaker configuration for MySQL resources. Connect to " -"the Pacemaker cluster with , and add the following cluster " -"resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 MySQL 相关资源。执行 命令进入 " -"Pacemaker 配置菜单,然后加入下列集群资源:" - -msgid "" -"You can now add the Pacemaker configuration for OpenStack Identity resource. " -"Connect to the Pacemaker cluster with crm configure, and " -"add the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 OpenStack 身份认证服务相关资源。执行 " -"crm configure 命令进入 Pacemaker 配置菜单,然后加入下列集" -"群资源:" - -msgid "" -"You can now add the Pacemaker configuration for OpenStack Networking Server " -"resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 OpenStack 网络服务相关资源。执行 crm " -"configure 命令进入 Pacemaker 配置菜单,然后加入下列集群资源:" - -msgid "" -"You can now add the Pacemaker configuration for managing all network " -"resources together with a group. Connect to the Pacemaker cluster with " -"crm configure, and add the following cluster resources:" -msgstr "" -"创建一个资源组将所有网络服务相关资源联系起来。执行 crm configure 命令进入 Pacemaker 配置菜单,然后加入下列集群资源:" - -msgid "" -"You can now add the Pacemaker configuration for the OpenStack Image API " -"resource. Connect to the Pacemaker cluster with crm configure, and add the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 OpenStack 镜像服务相关资源。执行 crm " -"configure 命令进入 Pacemaker 配置菜单,然后加入下列集群资源:" - -msgid "You can now check the Corosync connectivity with two tools." -msgstr "使用以下两个工具检查 Corosync 连接状态。" - -msgid "" -"You may also use the alternate device path for the DRBD device, which may be " -"easier to remember as it includes the self-explanatory resource name:" -msgstr "也可以使用 DRBD 设备的另外一个名称,该名称有解释含义,更容易记忆:" - -msgid "" -"You may now proceed with adding the Pacemaker configuration for RabbitMQ " -"resources. Connect to the Pacemaker cluster with , and add " -"the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 RabbitMQ 相关资源。执行 命令进" -"入 Pacemaker 配置菜单,然后加入下列集群资源:" - -msgid "" -"You may now proceed with adding the Pacemaker configuration for neutron DHCP " -"agent resource. Connect to the Pacemaker cluster with crm " -"configure, and add the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 neutron DHCP 代理程序相关资源。执行 crm " -"configure 命令进入 Pacemaker 配置菜单,然后加入下列集群资源:" - -msgid "" -"You may now proceed with adding the Pacemaker configuration for neutron L3 " -"agent resource. Connect to the Pacemaker cluster with crm " -"configure, and add the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 neutron L3 代理程序相关资源。执行 crm " -"configure 命令进入 Pacemaker 配置菜单,然后加入下列集群资源:" - -msgid "" -"You may now proceed with adding the Pacemaker configuration for neutron " -"metadata agent resource. Connect to the Pacemaker cluster with crm " -"configure, and add the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 neutron metadata 代理程序相关资源。执行 " -"crm configure 命令进入 Pacemaker 配置菜单,然后加入下列集" -"群资源:" - -msgid "" -"You may then proceed with adding the Pacemaker configuration for the " -"Telemetry central agent resource. Connect to the Pacemaker cluster with " -"crm configure, and add the following cluster resources:" -msgstr "" -"现在可以在 Pacemaker 中填加 Telemetry 监中心相关资源。执行 crm " -"configure 命令进入 Pacemaker 配置菜单,然后加入下列集群资源:" - -msgid "You must also create the OpenStack Image API endpoint with this IP." -msgstr "在 OpenStack 身份认证服务中需要为该 IP 地址创建对应的服务端点。" - -msgid "" -"You must complete the next step while the MySQL database server is shut down." -msgstr "下面的步骤在必须关闭MySQL数据库服务器之后进行。" - -msgid "You must create the Block Storage API endpoint with this IP." -msgstr "在 OpenStack 身份认证服务中需要为该 IP 地址创建对应的服务端点。" - -msgid "" -"You need to create the OpenStack Networking server endpoint with this IP." -msgstr "在 OpenStack 身份认证服务中需要为该 IP 地址创建对应的服务端点。" - -msgid "" -"You need to edit your OpenStack Identity configuration file " -"(keystone.conf) and change the bind parameters:" -msgstr "" -"编辑 OpenStack 身份认证服务的配置文件( keystone.conf ),调整以下配置项:" - -msgid "" -"You should see a status=joined entry for each of your " -"constituent cluster nodes." -msgstr "status=joined标示着每一个集群节点成员。" - -msgid "" -"You use virtual IP addresses when configuring OpenStack Identity endpoints." -msgstr "为 OpenStack 身份认证中的服务端点配置虚拟 IP 地址。" - -msgid "" -"Your OpenStack services must now point their Block Storage API configuration " -"to the highly available, virtual cluster IP address — rather than a Block " -"Storage API server’s physical IP address as you normally would." -msgstr "" -"其它 OpenStack 服务也相应地使用高可用、使用虚拟 IP 地址的 OpenStack 块设备存" -"储服务,而不在使用其所在服务器的物理 IP 地址。" - -msgid "" -"Your OpenStack services must now point their MySQL configuration to the " -"highly available, virtual cluster IP addressrather than a MySQL server's " -"physical IP address as you normally would." -msgstr "" -"现在可以将各 OpenStack 服务配置文件中使用物理 IP 地址的 MySQL 访问方式,更改" -"为访问高可用、使用虚拟 IP 地址的 MySQL 服务。" - -msgid "" -"Your OpenStack services must now point their OpenStack Identity " -"configuration to the highly available, virtual cluster IP address — rather " -"than a OpenStack Identity server’s physical IP address as you normally would." -msgstr "" -"其它 OpenStack 服务也相应地使用高可用、使用虚拟 IP 地址的 OpenStack 身份认证" -"服务,而不在使用其所在服务器的物理 IP 地址。" - -msgid "" -"Your OpenStack services must now point their OpenStack Image API " -"configuration to the highly available, virtual cluster IP address — rather " -"than an OpenStack Image API server’s physical IP address as you normally " -"would." -msgstr "" -"其它 OpenStack 服务也相应地使用高可用、使用虚拟 IP 地址的 OpenStack 镜像服" -"务,而不在使用其所在服务器的物理 IP 地址。" - -msgid "" -"Your OpenStack services must now point their OpenStack Networking Server " -"configuration to the highly available, virtual cluster IP address — rather " -"than an OpenStack Networking server’s physical IP address as you normally " -"would." -msgstr "" -"其它 OpenStack 服务也相应地使用高可用、使用虚拟 IP 地址的 OpenStack 网络服" -"务,而不在使用其所在服务器的物理 IP 地址。" - -msgid "" -"Your OpenStack services must now point their RabbitMQ configuration to the " -"highly available, virtual cluster IP addressrather than a RabbitMQ server's " -"physical IP address as you normally would." -msgstr "" -"现在可以将各 OpenStack 服务配置文件中使用物理 IP 地址的 RabbitMQ 访问方式,更" -"改为访问高可用、使用虚拟 IP 地址的 RabbitMQ 服务。" - -msgid "" -"a service group and order and " -"colocation constraints to ensure resources are started on " -"the correct nodes, and in the correct sequence." -msgstr "资源组以及顺序、协同约束条件,会确保资源在正确的节点安照正确次序启动。" - -msgid "" -"a service group and order and colocation constraints to ensure resources are " -"started on the correct nodes, and in the correct sequence." -msgstr "资源组以及顺序、协同约束条件,会确保资源在正确的节点安照正确次序启动。" - -msgid "ceilometer-collector" -msgstr "ceilometer-collector" - -msgid "cinder-scheduler" -msgstr "cinder-scheduler" - -msgid "cluster-glue" -msgstr "cluster-glue" - -msgid "configuring RabbitMQ to listen on that IP address," -msgstr "配置 RabbitMQ 监听该 IP 地址," - -msgid "" -"configuring RabbitMQ to use a data directory residing on that DRBD device," -msgstr "配置 RabbitMQ 使用建立在 DRBD 设备之上的数据目录," - -msgid "configuring a DRBD device for use by RabbitMQ," -msgstr "为 RabbitMQ 配置一个 DRBD 设备" - -msgid "corosync" -msgstr "corosync" - -msgid "crmsh" -msgstr "crmsh" - -msgid "current" -msgstr "current" - -msgid "heat-engine" -msgstr "heat-engine" - -msgid "l3_ha" -msgstr "l3_ha" - -msgid "" -"managing all resources, including the RabbitMQ daemon itself, with the " -"Pacemaker cluster manager." -msgstr "使用 Pacemaker 管理上述所有资源,包括 RabbitMQ 守护进程本身。" - -msgid "max_l3_agents_per_router" -msgstr "max_l3_agents_per_router" - -msgid "min_l3_agents_per_router" -msgstr "min_l3_agents_per_router" - -msgid "mysql PIDs" -msgstr "mysql 进程号" - -msgid "neutron-server" -msgstr "neutron-server" - -msgid "nova-conductor" -msgstr "nova-conductor" - -msgid "nova-scheduler" -msgstr "nova-scheduler" - -msgid "resource-agents" -msgstr "resource-agents" - -msgid "" -"selecting and assigning a virtual IP address (VIP) that can freely float " -"between cluster nodes," -msgstr "选择并绑定一个可以在各集群节点之间迁移的虚拟 IP 地址 (即 VIP )," - -#. Put one translator per line, in the form of NAME , YEAR1, YEAR2 -msgid "translator-credits" -msgstr "translator-credits" diff --git a/doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml b/doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml deleted file mode 100644 index a51580d1..00000000 --- a/doc/high-availability-guide/network/section_highly_available_neutron_dhcp_agent.xml +++ /dev/null @@ -1,45 +0,0 @@ -
- - Highly available neutron DHCP agent - - The neutron DHCP agent distributes IP addresses to the VMs with dnsmasq (by -default). High availability for the DHCP agent is achieved by adopting -Pacemaker. - - Here is the documentation for installing neutron DHCP agent. - -
- - Add neutron DHCP agent resource to Pacemaker - - First of all, you need to download the resource agent to your system: - # cd /usr/lib/ocf/resource.d/openstack -# wget https://raw.github.com/madkiss/openstack-resource-agents/master/ocf/neutron-agent-dhcp -# chmod a+rx neutron-agent-dhcp - You may now proceed with adding the Pacemaker configuration for -neutron DHCP agent resource. Connect to the Pacemaker cluster with crm -configure, and add the following cluster resources: - primitive p_neutron-dhcp-agent ocf:openstack:neutron-agent-dhcp \ - params config="/etc/neutron/neutron.conf" \ - plugin_config="/etc/neutron/dhcp_agent.ini" \ - op monitor interval="30s" timeout="30s" - This configuration creates: - - - p_neutron-agent-dhcp, a - resource for managing the neutron DHCP Agent service. - - - crm configure supports batch input, so you may copy and paste the -above into your live pacemaker configuration, and then make changes as -required. - Once completed, commit your configuration changes by entering commit -from the crm configure menu. Pacemaker will then start the neutron DHCP -agent service, and its dependent resources, on one of your nodes. -
-
diff --git a/doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml b/doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml deleted file mode 100644 index 8395283e..00000000 --- a/doc/high-availability-guide/network/section_highly_available_neutron_l3_agent.xml +++ /dev/null @@ -1,49 +0,0 @@ -
- - Highly available neutron L3 agent - - The neutron L3 agent provides L3/NAT forwarding to ensure external network access -for VMs on tenant networks. High availability for the L3 agent is achieved by -adopting Pacemaker. - - Here is the documentation - for installing neutron L3 agent. - -
- - Add neutron L3 agent resource to Pacemaker - - First of all, you need to download the resource agent to your system: - # cd /usr/lib/ocf/resource.d/openstack -# wget https://raw.github.com/madkiss/openstack-resource-agents/master/ocf/neutron-agent-l3 -# chmod a+rx neutron-l3-agent - You may now proceed with adding the Pacemaker configuration for -neutron L3 agent resource. Connect to the Pacemaker cluster with crm -configure, and add the following cluster resources: - primitive p_neutron-l3-agent ocf:openstack:neutron-agent-l3 \ - params config="/etc/neutron/neutron.conf" \ - plugin_config="/etc/neutron/l3_agent.ini" \ - op monitor interval="30s" timeout="30s" - This configuration creates - - - p_neutron-l3-agent, a resource for manage Neutron L3 Agent service - - - - crm configure supports batch input, so you may copy and paste the -above into your live pacemaker configuration, and then make changes as -required. - Once completed, commit your configuration changes by entering commit -from the crm configure menu. Pacemaker will then start the neutron L3 agent -service, and its dependent resources, on one of your nodes. - - This method does not ensure a zero downtime since it has to recreate all -the namespaces and virtual routers on the node. - -
-
diff --git a/doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml b/doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml deleted file mode 100644 index 8270d12d..00000000 --- a/doc/high-availability-guide/network/section_highly_available_neutron_metadata_agent.xml +++ /dev/null @@ -1,41 +0,0 @@ -
- Highly available neutron metadata agent - Neutron metadata agent allows Compute API metadata to be reachable by VMs on tenant -networks. High availability for the metadata agent is achieved by adopting -Pacemaker. - - Here is the documentation for installing Neutron Metadata Agent. - -
- Add neutron metadata agent resource to Pacemaker - First of all, you need to download the resource agent to your system: - # cd /usr/lib/ocf/resource.d/openstack -# wget https://raw.github.com/madkiss/openstack-resource-agents/master/ocf/neutron-metadata-agent -# chmod a+rx neutron-metadata-agent - You may now proceed with adding the Pacemaker configuration for -neutron metadata agent resource. Connect to the Pacemaker cluster with crm -configure, and add the following cluster resources: - primitive p_neutron-metadata-agent ocf:openstack:neutron-metadata-agent \ - params config="/etc/neutron/neutron.conf" \ - agent_config="/etc/neutron/metadata_agent.ini" \ - op monitor interval="30s" timeout="30s" - This configuration creates - - - p_neutron-metadata-agent, a resource for manage Neutron Metadata Agent -service - - - - crm configure supports batch input, so you may copy and paste the -above into your live Pacemaker configuration, and then make changes as -required. - Once completed, commit your configuration changes by entering commit -from the crm configure menu. Pacemaker will then start the neutron metadata -agent service, and its dependent resources, on one of your nodes. -
-
diff --git a/doc/high-availability-guide/network/section_manage_network_resources.xml b/doc/high-availability-guide/network/section_manage_network_resources.xml deleted file mode 100644 index 416d4886..00000000 --- a/doc/high-availability-guide/network/section_manage_network_resources.xml +++ /dev/null @@ -1,15 +0,0 @@ -
- - Manage network resources - - You can now add the Pacemaker configuration for -managing all network resources together with a group. -Connect to the Pacemaker cluster with crm configure, and add the following -cluster resources: - group g_services_network p_neutron-l3-agent p_neutron-dhcp-agent \ - p_neutron-metadata_agent -
diff --git a/doc/high-availability-guide/openstack.ent b/doc/high-availability-guide/openstack.ent deleted file mode 100644 index 3c8d3ca4..00000000 --- a/doc/high-availability-guide/openstack.ent +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - -COPY'> -GET'> -HEAD'> -PUT'> -POST'> -DELETE'> - - - - - -'> diff --git a/doc/high-availability-guide/pacemaker/section_install_packages.xml b/doc/high-availability-guide/pacemaker/section_install_packages.xml deleted file mode 100644 index 606a22e8..00000000 --- a/doc/high-availability-guide/pacemaker/section_install_packages.xml +++ /dev/null @@ -1,44 +0,0 @@ - -
- Install packages - On any host that is meant to be part of a Pacemaker cluster, you must - first establish cluster communications through the Corosync messaging - layer. This involves installing the following packages (and their - dependencies, which your package manager will normally install - automatically): - - - pacemaker (Note that the crm shell should be - downloaded separately.) - - - - crmsh - - - - - corosync - - - - - cluster-glue - - - - fence-agents (Fedora only; all other - distributions use fencing agents from - cluster-glue) - - - - resource-agents - - - -
diff --git a/doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml b/doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml deleted file mode 100644 index 2bef42b2..00000000 --- a/doc/high-availability-guide/pacemaker/section_set_basic_cluster_properties.xml +++ /dev/null @@ -1,54 +0,0 @@ - -
- Set basic cluster properties - Once your Pacemaker cluster is set up, it is recommended to set a few - basic cluster properties. To do so, start the crm shell - and change into the configuration menu by entering - configure. Alternatively, you may jump straight into - the Pacemaker configuration menu by typing crm configure - directly from a shell prompt. - Then, set the following properties: - property no-quorum-policy="ignore" \ # - pe-warn-series-max="1000" \ # - pe-input-series-max="1000" \ - pe-error-series-max="1000" \ - cluster-recheck-interval="5min" # - - - Setting is required - in 2-node Pacemaker clusters for the following reason: if quorum - enforcement is enabled, and one of the two nodes fails, then the - remaining node can not establish a majority of quorum votes necessary - to run services, and thus it is unable to take over any resources. In - this case, the appropriate workaround is to ignore loss of quorum in - the cluster. This should only only be done in 2-node clusters: do not - set this property in Pacemaker clusters with more than two nodes. Note - that a two-node cluster with this setting exposes a risk of - split-brain because either half of the cluster, or both, are able to - become active in the event that both nodes remain online but lose - communication with one another. The preferred configuration is 3 or - more nodes per cluster. - - - Setting , - and - to 1000 instructs Pacemaker to - keep a longer history of the inputs processed, and errors and warnings - generated, by its Policy Engine. This history is typically useful in - case cluster troubleshooting becomes necessary. - - - Pacemaker uses an event-driven approach to cluster state - processing. However, certain Pacemaker actions occur at a configurable - interval, , which defaults to - 15 minutes. It is usually prudent to reduce this to a shorter interval, - such as 5 or 3 minutes. - - - Once you have made these changes, you may commit - the updated configuration. -
diff --git a/doc/high-availability-guide/pacemaker/section_set_up_corosync.xml b/doc/high-availability-guide/pacemaker/section_set_up_corosync.xml deleted file mode 100644 index 3bb3a89d..00000000 --- a/doc/high-availability-guide/pacemaker/section_set_up_corosync.xml +++ /dev/null @@ -1,397 +0,0 @@ - -
- Set up Corosync - Besides installing the Corosync package, you must - also create a configuration file, stored in - /etc/corosync/corosync.conf. Corosync can be configured - to work with either multicast or unicast IP addresses. - -
- Set up Corosync with multicast - Most distributions ship an example configuration file (corosync.conf.example) - as part of the documentation bundled with the Corosync - package. An example Corosync configuration file is shown below: - - Corosync configuration file (<filename>corosync.conf</filename>) - - totem { - version: 2 - - # Time (in ms) to wait for a token - token: 10000 - - # How many token retransmits before forming a new - # configuration - token_retransmits_before_loss_const: 10 - - # Turn off the virtual synchrony filter - vsftype: none - - # Enable encryption - secauth: on - - # How many threads to use for encryption/decryption - threads: 0 - - # This specifies the redundant ring protocol, which may be - # none, active, or passive. - rrp_mode: active - - # The following is a two-ring multicast configuration. - interface { - ringnumber: 0 - bindnetaddr: 192.168.42.0 - mcastaddr: 239.255.42.1 - mcastport: 5405 - } - interface { - ringnumber: 1 - bindnetaddr: 10.0.42.0 - mcastaddr: 239.255.42.2 - mcastport: 5405 - } -} - -amf { - mode: disabled -} - -service { - # Load the Pacemaker Cluster Resource Manager - ver: 1 - name: pacemaker -} - -aisexec { - user: root - group: root -} - -logging { - fileline: off - to_stderr: yes - to_logfile: no - to_syslog: yes - syslog_facility: daemon - debug: off - timestamp: on - logger_subsys { - subsys: AMF - debug: off - tags: enter|leave|trace1|trace2|trace3|trace4|trace6 - }} - - - - - The value specifies the time, in - milliseconds, during which the Corosync token is expected to be - transmitted around the ring. When this timeout expires, the token is - declared lost, and after - lost tokens the non-responding processor (cluster node) is declared - dead. In other words, - × - is the maximum time a node is allowed to not respond to cluster - messages before being considered dead. The default for - is 1000 (1 second), with 4 allowed - retransmits. These defaults are intended to minimize failover times, - but can cause frequent "false alarms" and unintended failovers in case - of short network interruptions. The values used here are safer, albeit - with slightly extended failover times. - - - With enabled, Corosync nodes mutually - authenticate using a 128-byte shared secret stored in - /etc/corosync/authkey, which may be generated with - the corosync-keygen utility. When using - , cluster communications are also - encrypted. - - - In Corosync configurations using redundant networking (with more - than one ), you must select a Redundant - Ring Protocol (RRP) mode other than none. - active is the recommended RRP mode. - - - There are several things to note about the recommended interface - configuration: - - - The must differ between all - configured interfaces, starting with 0. - - - The is the network address of - the interfaces to bind to. The example uses two network addresses - of /24 IPv4 subnets. - - - Multicast groups () must not be - reused across cluster boundaries. In other words, no two distinct - clusters should ever use the same multicast group. Be sure to - select multicast addresses compliant with - RFC 2365, - "Administratively Scoped IP Multicast". - - - For firewall configurations, note that Corosync communicates - over UDP only, and uses mcastport (for receives) - and mcastport - 1 (for sends). - - - - - The service declaration for the - pacemaker service may be placed in the - corosync.conf file directly, or in its own - separate file, - /etc/corosync/service.d/pacemaker. - - If you are using Corosync version 2 on Ubuntu 14.04, remove - or comment out lines under the service stanza, which enables - Pacemaker to start up. - - - - Once created, the corosync.conf file (and the - authkey file if the option - is enabled) must be synchronized across all cluster nodes. -
-
- Set up Corosync with unicast - Some environments may not support multicast. For such cases, - Corosync should be configured for unicast. - An example fragment of the Corosync configuration file is shown below: - - Corosync configuration file fragment (<filename>corosync.conf</filename>) - - totem { - #... - interface { - ringnumber: 0 - bindnetaddr: 192.168.42.0 - broadcast: yes - mcastport: 5405 - } - interface { - ringnumber: 1 - bindnetaddr: 10.0.42.0 - broadcast: yes - mcastport: 5405 - } - transport: udpu -} - -nodelist { - node { - ring0_addr: 192.168.42.1 - ring1_addr: 10.0.42.1 - nodeid: 1 - } - node { - ring0_addr: 192.168.42.2 - ring1_addr: 10.0.42.2 - nodeid: 2 - } -} -#... - - - - - If the is set to yes, the broadcast address - is used for communication. If this option is set, - should not be set. - - - The directive controls the transport mechanism used. - To avoid the use of multicast entirely, a unicast transport parameter should - be specified. This requires specifying the list of members in - directive; this could potentially make up the membership before deployment. - The default is . The transport type can also be set to or . - - - Within the directive, it is possible to specify specific - information about nodes in cluster. Directive can contain only the - sub-directive, which specifies every node that should be a member of the membership, - and where non-default options are needed. Every node must have at least the - field filled. - - For UDPU, every node that should be a member of the membership must - be specified. - - Possible options are: - The specifies the IP address of one of the nodes. - X is the ring number. - The configuration option is optional when - using IPv4 and required when using IPv6. This is a 32-bit value - specifying the node identifier delivered to the cluster membership - service. When using IPv4, this defaults to the 32-bit IP address to - which the system is bound with the ring identifier of 0. The node - identifier value of zero is reserved and should not be used. - - -
- -
- Set up Corosync with votequorum library for a full-size cluster - This section describes a full-size cluster configuration with three or - more members which is appropriate for production deployments. - For a two-node configuration that can be used for demostrations and testing, - please go to the next section. - - Votequorum library is a part of the Corosync project. It provides - an interface to the vote-based quorum service and it must be explicitly - enabled in the Corosync configuration file. The main role of the votequorum - library is to avoid split-brain situations, but it also provides a mechanism - to: - - - Query the quorum status - - - Get a list of nodes known to the quorum service - - - Receive notifications of quorum state changes - - - Change the number of votes assigned to a node - - - Change the number of expected votes for a cluster to be - quorate - - - Connect an additional quorum device to allow small clusters - remain quorate during node outages - - - Votequorum library has been created to replace and eliminate from advanced - cluster configurations qdisk, disk-based quorum daemon for CMAN. - - - Votequorum service configuration within Corosync - - quorum { - provider: corosync_votequorum - expected_votes: 7 - wait_for_all: 1 - last_man_standing: 1 - last_man_standing_window: 10000 - } - - - - Provider - enables votequorum library, this is the only required option. - - The cluster is fully operational with - 7 nodes (each node has 1 vote), - quorum: 4. If list of nodes is specified as - the is ignored. - - - When starting up a cluster (all nodes down) it will hold the cluster quorum - until all of the nodes become online and joint the cluster first time - (new in Corosync 2.0). - - enable - Last Man Standing (LMS) feature (disabled by default: 0). If a cluster is on - the quorum edge (expected_votes: 7; online nodes: 4) for time longer than - configured in , the cluster can - recalculate quorum and continue operating even if the next node will be - lost. This logic is repeated until the number of online nodes in the cluster - reach 2. In order to allow cluster step down from 2 members to only 1, - what is not recommended option needs - to be set. - - - is time required to recalculate the quorum after one or most hosts have been - lost from the cluster. To do the new quorum recalculation, the cluster needs - to have quorum at least for , - time in [ms] (default: 10000ms). - - -
- -
- Set up Corosync with votequorum library for two-host clusters - The two-node cluster configuration is a special case that Pacemaker - supports for demonstration and testing; it is a special feature of the - votequorum library and is not recommended for production environments. - - - Multicast votequorum service configuration for two-host - Corosync cluster - - quorum { - provider: corosync_votequorum - expected_votes: 2 - two_node: 1 - } - - - - - enables votequorum provider library. - - Put the cluser into two-node operational - mode (default: 0). - Setting to 1 enables - by default. It is still possible to - override by explicitly setting it - to 0. If more than 2 nodes join the cluster, the - option is automatically disabled. - - - Disabling in a two-node cluster - may be dangerous as it can lead to a situation (stonith deathmatch) - where each node comes up, assumes the other is down, and fences - peer in order to safely start clustered services. The peer - eventually comes up and repeats the process until the underlying - fault is rectified. - - - - - - - Unicast (UDP) votequorum service configuration for two-host - Corosync cluster - - quorum { - provider: corosync_votequorum - two_node: 1 - } - - nodelist { - node { - ring0_addr: 192.168.1.1 - } - node { - ring0_addr: 192.168.1.2 - } - } - - - - - enables votequorum provider library. - - For the cluster to work with two members only - artificially sets quorum below mathematical - majority. - - Unicast Corosync configuratrion requires - option to explicitly provide a list of cluster - members. - - -
-
diff --git a/doc/high-availability-guide/pacemaker/section_start_pacemaker.xml b/doc/high-availability-guide/pacemaker/section_start_pacemaker.xml deleted file mode 100644 index e3f76611..00000000 --- a/doc/high-availability-guide/pacemaker/section_start_pacemaker.xml +++ /dev/null @@ -1,48 +0,0 @@ - -
- Start Pacemaker - Once the Corosync services have been started and you have established - that the cluster is communicating properly, it is safe to start - pacemakerd, the Pacemaker - master control process: - - - - /etc/init.d/pacemaker start (LSB) - - - - - service pacemaker start (LSB, alternate) - - - - - start pacemaker (upstart) - - - - - systemctl start pacemaker (systemd) - - - - Once the Pacemaker services have started, Pacemaker will create a default - empty cluster configuration with no resources. You may observe - Pacemaker's status with the crm_mon utility: - ============ -Last updated: Sun Oct 7 21:07:52 2012 -Last change: Sun Oct 7 20:46:00 2012 via cibadmin on node2 -Stack: openais -Current DC: node2 - partition with quorum -Version: 1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c -2 Nodes configured, 2 expected votes -0 Resources configured. -============ - -Online: [ node2 node1 ] -
diff --git a/doc/high-availability-guide/pacemaker/section_starting_corosync.xml b/doc/high-availability-guide/pacemaker/section_starting_corosync.xml deleted file mode 100644 index 1aa068dd..00000000 --- a/doc/high-availability-guide/pacemaker/section_starting_corosync.xml +++ /dev/null @@ -1,62 +0,0 @@ - -
- Starting Corosync - Corosync is started as a regular system service. Depending on your - distribution, it may ship with an LSB init script, an - upstart job, or a systemd unit file. Either way, the service is - usually named corosync: - - - - /etc/init.d/corosync start (LSB) - - - - - service corosync start (LSB, alternate) - - - - - start corosync (upstart) - - - - - systemctl start corosync (systemd) - - - - You can now check the Corosync connectivity with two tools. - The corosync-cfgtool utility, when invoked with - the option, gives a summary of the health of the - communication rings: - # corosync-cfgtool -s - Printing ring status. -Local node ID 435324542 -RING ID 0 - id = 192.168.42.82 - status = ring 0 active with no faults -RING ID 1 - id = 10.0.42.100 - status = ring 1 active with no faults - The corosync-objctl utility can be used to dump the - Corosync cluster member list: - # corosync-objctl runtime.totem.pg.mrp.srp.members - runtime.totem.pg.mrp.srp.435324542.ip=r(0) ip(192.168.42.82) r(1) ip(10.0.42.100) -runtime.totem.pg.mrp.srp.435324542.join_count=1 -runtime.totem.pg.mrp.srp.435324542.status=joined -runtime.totem.pg.mrp.srp.983895584.ip=r(0) ip(192.168.42.87) r(1) ip(10.0.42.254) -runtime.totem.pg.mrp.srp.983895584.join_count=1 -runtime.totem.pg.mrp.srp.983895584.status=joined - You should see a status=joined entry for each of - your constituent cluster nodes. - - If you are using Corosync version 2, use the corosync-cmapctl utility - as it is a direct replacement for corosync-objctl. - -
diff --git a/doc/high-availability-guide/part_active_active.xml b/doc/high-availability-guide/part_active_active.xml deleted file mode 100644 index 72131eaa..00000000 --- a/doc/high-availability-guide/part_active_active.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - HA using active/active - - - - - - diff --git a/doc/high-availability-guide/part_active_passive.xml b/doc/high-availability-guide/part_active_passive.xml deleted file mode 100644 index b3711f70..00000000 --- a/doc/high-availability-guide/part_active_passive.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - HA using active/passive - - - - - diff --git a/doc/high-availability-guide/pom.xml b/doc/high-availability-guide/pom.xml deleted file mode 100644 index 7d6e5178..00000000 --- a/doc/high-availability-guide/pom.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - org.openstack.docs - parent-pom - 1.0.0-SNAPSHOT - ../pom.xml - - 4.0.0 - openstack-high-availability-guide - jar - OpenStack High Availability Guide - - - - 0 - - - - - - - - com.rackspace.cloud.api - clouddocs-maven-plugin - - - - generate-webhelp - - generate-webhelp - - generate-sources - - - ${comments.enabled} - openstack-ha-guide - 1 - UA-17511903-1 - - appendix toc,title - article/appendix nop - article toc,title - book toc,title,figure,table,example,equation - chapter toc,title - section toc - part toc,title - qandadiv toc - qandaset toc - reference toc,title - set toc,title - - - 0 - 1 - 0 - high-availability-guide - high-availability-guide - - - - - - true - . - - bk-ha-guide.xml - - http://docs.openstack.org/high-availability-guide/content - ${basedir}/../glossary/glossary-terms.xml - openstack - - - - - diff --git a/doc/pom.xml b/doc/pom.xml deleted file mode 100644 index 41b6d8fd..00000000 --- a/doc/pom.xml +++ /dev/null @@ -1,46 +0,0 @@ - - - 4.0.0 - - org.openstack.docs - parent-pom - 1.0.0-SNAPSHOT - pom - - - high-availability-guide - - - - Rackspace Research Repositories - - true - - - - rackspace-research - Rackspace Research Repository - http://maven.research.rackspacecloud.com/content/groups/public/ - - - - - rackspace-research - Rackspace Research Repository - http://maven.research.rackspacecloud.com/content/groups/public/ - - - - - - - - com.rackspace.cloud.api - clouddocs-maven-plugin - 2.1.4 - - - - diff --git a/tools/build-all-rst.sh b/tools/build-all-rst.sh index 814b0bb8..8a38d1fd 100755 --- a/tools/build-all-rst.sh +++ b/tools/build-all-rst.sh @@ -3,4 +3,4 @@ mkdir -p publish-docs tools/build-rst.sh doc/ha-guide --build build \ - --target draft/ha-guide + --target ha-guide diff --git a/tox.ini b/tox.ini index 248d514e..d98d5f06 100644 --- a/tox.ini +++ b/tox.ini @@ -20,27 +20,25 @@ whitelist_externals = commands = {posargs} [testenv:checklinks] -commands = openstack-doc-test --check-links {posargs} +commands = true [testenv:checkniceness] commands = - openstack-doc-test --check-niceness {posargs} doc8 doc [testenv:checksyntax] commands = - openstack-doc-test --check-syntax {posargs} # Check that .po and .pot files are valid: bash -c "find doc -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:checkdeletions] -commands = openstack-doc-test --check-deletions {posargs} +commands = true [testenv:checkbuild] commands = # Build and copy RST Guides {toxinidir}/tools/build-all-rst.sh - # Build DocBook Guide + # This only generates the index page openstack-doc-test --check-build {posargs} [testenv:publishdocs] @@ -50,8 +48,6 @@ commands = commands = # Build and copy RST Guides {toxinidir}/tools/build-all-rst.sh - # Build DocBook Guide - openstack-doc-test --check-build --publish [testenv:checklang] # Generatedocbook needs xml2po which cannot be installed @@ -59,7 +55,10 @@ commands = # use sitepackages. sitepackages=True whitelist_externals = doc-tools-check-languages -commands = doc-tools-check-languages doc-tools-check-languages.conf test all +commands = + doc-tools-check-languages doc-tools-check-languages.conf test all + # Check that .po and .pot files are valid: + bash -c "find doc -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:buildlang] # Run as "tox -e buildlang -- $LANG"