diff --git a/doc-tools-check-languages.conf b/doc-tools-check-languages.conf index 97c74b5183..7172c0202c 100644 --- a/doc-tools-check-languages.conf +++ b/doc-tools-check-languages.conf @@ -7,10 +7,10 @@ declare -A BOOKS=( ["de"]="image-guide install-guide" ["fr"]="install-guide" ["id"]="image-guide install-guide" - ["ja"]="ha-guide image-guide install-guide" + ["ja"]="image-guide install-guide" ["ko_KR"]="install-guide" ["ru"]="install-guide" - ["tr_TR"]="arch-design ha-guide image-guide install-guide" + ["tr_TR"]="arch-design image-guide install-guide" ["zh_CN"]="install-guide" ) @@ -24,7 +24,6 @@ declare -A SPECIAL_BOOKS=( # This needs special handling, handle it with the RST tools. ["common"]="RST" ["glossary"]="RST" - ["ha-guide"]="RST" ["image-guide"]="RST" ["install-guide"]="RST" # Do not translate diff --git a/doc/arch-design/source/design-storage/design-storage-arch.rst b/doc/arch-design/source/design-storage/design-storage-arch.rst index 649b223d00..64edcafad9 100644 --- a/doc/arch-design/source/design-storage/design-storage-arch.rst +++ b/doc/arch-design/source/design-storage/design-storage-arch.rst @@ -496,7 +496,7 @@ servers and load balancing. HAProxy is one method of providing load balancing and high availability and is often combined with keepalived or pacemaker to ensure the HAProxy service maintains a stable VIP. Sample HAProxy configurations can be found in the `OpenStack HA Guide. -`_. +`_. Replication ----------- diff --git a/doc/doc-contrib-guide/source/doc-bugs.rst b/doc/doc-contrib-guide/source/doc-bugs.rst index 1f94458f11..f7c86dd025 100644 --- a/doc/doc-contrib-guide/source/doc-bugs.rst +++ b/doc/doc-contrib-guide/source/doc-bugs.rst @@ -249,8 +249,7 @@ Depending on the area a bug affects, it has one or more tags. For example: * **low-hanging-fruit** for documentation bugs that are straightforward to fix. If you are a newcomer, this is a way to start. -* **ha-guide**, **install-guide**, **image-guide**, and other for specific - guides. +* **install-guide**, **image-guide**, and other for specific guides. * **infra**, **theme** for documentation bugs that are in the documentation build tool chain. diff --git a/doc/doc-contrib-guide/source/topic-tags.rst b/doc/doc-contrib-guide/source/topic-tags.rst index 31cea25cfb..4fc417a0b6 100644 --- a/doc/doc-contrib-guide/source/topic-tags.rst +++ b/doc/doc-contrib-guide/source/topic-tags.rst @@ -18,9 +18,6 @@ to, use the following tags: [doc-contrib] OpenStack Documentation Contributor Guide -[ha-guide] - OpenStack High Availability Guide - [image-guide] OpenStack Virtual Machine Image Guide diff --git a/doc/ha-guide/setup.cfg b/doc/ha-guide/setup.cfg deleted file mode 100644 index d8b4586410..0000000000 --- a/doc/ha-guide/setup.cfg +++ /dev/null @@ -1,27 +0,0 @@ -[metadata] -name = openstackhaguide -summary = OpenStack High Availability Guide -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = https://docs.openstack.org/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Topic :: Documentation - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[files] - -[build_sphinx] -warning-is-error = 1 -build-dir = build -source-dir = source - -[wheel] -universal = 1 diff --git a/doc/ha-guide/setup.py b/doc/ha-guide/setup.py deleted file mode 100644 index 736375744d..0000000000 --- a/doc/ha-guide/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/doc/ha-guide/source/appendix.rst b/doc/ha-guide/source/appendix.rst deleted file mode 100644 index ca6b1a7182..0000000000 --- a/doc/ha-guide/source/appendix.rst +++ /dev/null @@ -1,8 +0,0 @@ -Appendix -~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - common/app-support.rst - common/glossary.rst diff --git a/doc/ha-guide/source/common b/doc/ha-guide/source/common deleted file mode 120000 index dc879abe93..0000000000 --- a/doc/ha-guide/source/common +++ /dev/null @@ -1 +0,0 @@ -../../common \ No newline at end of file diff --git a/doc/ha-guide/source/compute-node-ha.rst b/doc/ha-guide/source/compute-node-ha.rst deleted file mode 100644 index 940ea507f9..0000000000 --- a/doc/ha-guide/source/compute-node-ha.rst +++ /dev/null @@ -1,52 +0,0 @@ -============================ -Configuring the compute node -============================ - -The `Installation Guides -`_ -provide instructions for installing multiple compute nodes. -To make the compute nodes highly available, you must configure the -environment to include multiple instances of the API and other services. - -Configuring high availability for instances -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As of September 2016, the OpenStack High Availability community is -designing and developing an official and unified way to provide high -availability for instances. We are developing automatic -recovery from failures of hardware or hypervisor-related software on -the compute node, or other failures that could prevent instances from -functioning correctly, such as, issues with a cinder volume I/O path. - -More details are available in the `user story -`_ -co-authored by OpenStack's HA community and `Product Working Group -`_ (PWG), where this feature is -identified as missing functionality in OpenStack, which -should be addressed with high priority. - -Existing solutions -~~~~~~~~~~~~~~~~~~ - -The architectural challenges of instance HA and several currently -existing solutions were presented in `a talk at the Austin summit -`_, -for which `slides are also available `_. - -The code for three of these solutions can be found online at the following -links: - -* `a mistral-based auto-recovery workflow - `_, by Intel -* `masakari `_, by NTT -* `OCF RAs - `_, - as used by Red Hat and SUSE - -Current upstream work -~~~~~~~~~~~~~~~~~~~~~ - -Work is in progress on a unified approach, which combines the best -aspects of existing upstream solutions. More details are available on -`the HA VMs user story wiki -`_. diff --git a/doc/ha-guide/source/conf.py b/doc/ha-guide/source/conf.py deleted file mode 100644 index cf14559e54..0000000000 --- a/doc/ha-guide/source/conf.py +++ /dev/null @@ -1,299 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -# import sys - -import openstackdocstheme - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['openstackdocstheme'] - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -repository_name = "openstack/openstack-manuals" -bug_project = 'openstack-manuals' -project = u'High Availability Guide' -bug_tag = u'ha-guide' -copyright = u'2015-2018, OpenStack contributors' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '' -# The full version, including alpha/beta/rc tags. -release = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['common/cli*', 'common/nova*', - 'common/appendix.rst', - 'common/get-started*', 'common/dashboard*'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - 'display_badge': False -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = [] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# So that we can enable "log-a-bug" links from each output HTML page, this -# variable must be set to a format that includes year, month, day, hours and -# minutes. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -html_use_index = False - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = False - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'ha-guide' - -# If true, publish source files -html_copy_source = False - -# -- Options for LaTeX output --------------------------------------------- -pdf_theme_path = openstackdocstheme.get_pdf_theme_path() -openstack_logo = openstackdocstheme.get_openstack_logo_path() - -latex_custom_template = r""" -\newcommand{\openstacklogo}{%s} -\usepackage{%s} -""" % (openstack_logo, pdf_theme_path) - -latex_engine = 'xelatex' - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - 'papersize': 'a4paper', - - # The font size ('10pt', '11pt' or '12pt'). - 'pointsize': '11pt', - - #Default figure align - 'figure_align': 'H', - - # Not to generate blank page after chapter - 'classoptions': ',openany', - - # Additional stuff for the LaTeX preamble. - 'preamble': latex_custom_template, -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'HAGuide.tex', u'High Availability Guide', - u'OpenStack contributors', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'haguide', u'High Availability Guide', - [u'OpenStack contributors'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'HAGuide', u'High Availability Guide', - u'OpenStack contributors', 'HAGuide', - 'This guide shows OpenStack operators and deployers how to configure' - 'OpenStack Networking to be robust and fault-tolerant.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/doc/ha-guide/source/controller-ha-haproxy.rst b/doc/ha-guide/source/controller-ha-haproxy.rst deleted file mode 100644 index 1dbf09e515..0000000000 --- a/doc/ha-guide/source/controller-ha-haproxy.rst +++ /dev/null @@ -1,233 +0,0 @@ -======= -HAProxy -======= - -HAProxy provides a fast and reliable HTTP reverse proxy and load balancer -for TCP or HTTP applications. It is particularly suited for web crawling -under very high loads while needing persistence or Layer 7 processing. -It realistically supports tens of thousands of connections with recent -hardware. - -Each instance of HAProxy configures its front end to accept connections only -to the virtual IP (VIP) address. The HAProxy back end (termination -point) is a list of all the IP addresses of instances for load balancing. - -.. note:: - - Ensure your HAProxy installation is not a single point of failure, - it is advisable to have multiple HAProxy instances running. - - You can also ensure the availability by other means, using Keepalived - or Pacemaker. - -Alternatively, you can use a commercial load balancer, which is hardware -or software. We recommend a hardware load balancer as it generally has -good performance. - -For detailed instructions about installing HAProxy on your nodes, -see the HAProxy `official documentation `_. - -Configuring HAProxy -~~~~~~~~~~~~~~~~~~~ - -#. Restart the HAProxy service. - -#. Locate your HAProxy instance on each OpenStack controller node in your - environment. The following is an example ``/etc/haproxy/haproxy.cfg`` - configuration file. Configure your instance using the following - configuration file, you will need a copy of it on each - controller node. - - - .. code-block:: none - - global - chroot /var/lib/haproxy - daemon - group haproxy - maxconn 4000 - pidfile /var/run/haproxy.pid - user haproxy - - defaults - log global - maxconn 4000 - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout check 10s - - listen dashboard_cluster - bind :443 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:443 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:443 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:443 check inter 2000 rise 2 fall 5 - - listen galera_cluster - bind :3306 - balance source - option mysql-check - server controller1 10.0.0.12:3306 check port 9200 inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:3306 backup check port 9200 inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:3306 backup check port 9200 inter 2000 rise 2 fall 5 - - listen glance_api_cluster - bind :9292 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:9292 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:9292 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:9292 check inter 2000 rise 2 fall 5 - - listen glance_registry_cluster - bind :9191 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:9191 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:9191 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:9191 check inter 2000 rise 2 fall 5 - - listen keystone_admin_public_internal_cluster - bind :5000 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:5000 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:5000 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:5000 check inter 2000 rise 2 fall 5 - - listen nova_ec2_api_cluster - bind :8773 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:8773 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8773 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8773 check inter 2000 rise 2 fall 5 - - listen nova_compute_api_cluster - bind :8774 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:8774 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8774 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8774 check inter 2000 rise 2 fall 5 - - listen nova_metadata_api_cluster - bind :8775 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:8775 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8775 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8775 check inter 2000 rise 2 fall 5 - - listen cinder_api_cluster - bind :8776 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:8776 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8776 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8776 check inter 2000 rise 2 fall 5 - - listen ceilometer_api_cluster - bind :8777 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:8777 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8777 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8777 check inter 2000 rise 2 fall 5 - - listen nova_vncproxy_cluster - bind :6080 - balance source - option tcpka - option tcplog - server controller1 10.0.0.12:6080 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:6080 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:6080 check inter 2000 rise 2 fall 5 - - listen neutron_api_cluster - bind :9696 - balance source - option tcpka - option httpchk - option tcplog - server controller1 10.0.0.12:9696 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:9696 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:9696 check inter 2000 rise 2 fall 5 - - listen swift_proxy_cluster - bind :8080 - balance source - option tcplog - option tcpka - server controller1 10.0.0.12:8080 check inter 2000 rise 2 fall 5 - server controller2 10.0.0.13:8080 check inter 2000 rise 2 fall 5 - server controller3 10.0.0.14:8080 check inter 2000 rise 2 fall 5 - - .. note:: - - The Galera cluster configuration directive ``backup`` indicates - that two of the three controllers are standby nodes. - This ensures that only one node services write requests - because OpenStack support for multi-node writes is not yet production-ready. - - .. note:: - - The Telemetry API service configuration does not have the ``option httpchk`` - directive as it cannot process this check properly. - -.. TODO: explain why the Telemetry API is so special - -#. Configure the kernel parameter to allow non-local IP binding. This allows - running HAProxy instances to bind to a VIP for failover. Add following line - to ``/etc/sysctl.conf``: - - .. code-block:: none - - net.ipv4.ip_nonlocal_bind = 1 - -#. Restart the host or, to make changes work immediately, invoke: - - .. code-block:: console - - $ sysctl -p - -#. Add HAProxy to the cluster and ensure the VIPs can only run on machines - where HAProxy is active: - - ``pcs`` - - .. code-block:: console - - $ pcs resource create lb-haproxy systemd:haproxy --clone - $ pcs constraint order start vip then lb-haproxy-clone kind=Optional - $ pcs constraint colocation add lb-haproxy-clone with vip - - ``crmsh`` - - .. code-block:: console - - $ crm cib new conf-haproxy - $ crm configure primitive haproxy lsb:haproxy op monitor interval="1s" - $ crm configure clone haproxy-clone haproxy - $ crm configure colocation vip-with-haproxy inf: vip haproxy-clone - $ crm configure order haproxy-after-vip mandatory: vip haproxy-clone diff --git a/doc/ha-guide/source/controller-ha-identity.rst b/doc/ha-guide/source/controller-ha-identity.rst deleted file mode 100644 index 3eaea9ae72..0000000000 --- a/doc/ha-guide/source/controller-ha-identity.rst +++ /dev/null @@ -1,167 +0,0 @@ -============================= -Highly available Identity API -============================= - -Making the OpenStack Identity service highly available -in active and passive mode involves: - -- :ref:`identity-pacemaker` -- :ref:`identity-config-identity` -- :ref:`identity-services-config` - -.. _identity-pacemaker: - -Prerequisites -~~~~~~~~~~~~~ - -Before beginning, ensure you have read the -`OpenStack Identity service getting started documentation -`_. - -Add OpenStack Identity resource to Pacemaker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following section(s) detail how to add the OpenStack Identity -resource to Pacemaker on SUSE and Red Hat. - -SUSE ------ - -SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE, -use a set of OCF agents for controlling OpenStack services. - -#. Run the following commands to download the OpenStack Identity resource - to Pacemaker: - - .. code-block:: console - - # cd /usr/lib/ocf/resource.d - # mkdir openstack - # cd openstack - # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/keystone - # chmod a+rx * - -#. Add the Pacemaker configuration for the OpenStack Identity resource - by running the following command to connect to the Pacemaker cluster: - - .. code-block:: console - - # crm configure - -#. Add the following cluster resources: - - .. code-block:: console - - clone p_keystone ocf:openstack:keystone \ - params config="/etc/keystone/keystone.conf" os_password="secretsecret" os_username="admin" os_tenant_name="admin" os_auth_url="http://10.0.0.11:5000/v2.0/" \ - op monitor interval="30s" timeout="30s" - - .. note:: - - This configuration creates ``p_keystone``, - a resource for managing the OpenStack Identity service. - -#. Commit your configuration changes from the :command:`crm configure` menu - with the following command: - - .. code-block:: console - - # commit - -The :command:`crm configure` supports batch input. You may have to copy and -paste the above lines into your live Pacemaker configuration, and then make -changes as required. - -For example, you may enter ``edit p_ip_keystone`` from the -:command:`crm configure` menu and edit the resource to match your preferred -virtual IP address. - -Pacemaker now starts the OpenStack Identity service and its dependent -resources on all of your nodes. - -Red Hat --------- - -For Red Hat Enterprise Linux and Red Hat-based Linux distributions, -the following process uses Systemd unit files. - -.. code-block:: console - - # pcs resource create openstack-keystone systemd:openstack-keystone --clone interleave=true - -.. _identity-config-identity: - -Configure OpenStack Identity service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Edit the :file:`keystone.conf` file - to change the values of the :manpage:`bind(2)` parameters: - - .. code-block:: ini - - bind_host = 10.0.0.12 - public_bind_host = 10.0.0.12 - admin_bind_host = 10.0.0.12 - - The ``admin_bind_host`` parameter - lets you use a private network for admin access. - -#. To be sure that all data is highly available, - ensure that everything is stored in the MySQL database - (which is also highly available): - - .. code-block:: ini - - [catalog] - driver = keystone.catalog.backends.sql.Catalog - # ... - [identity] - driver = keystone.identity.backends.sql.Identity - # ... - -#. If the Identity service will be sending ceilometer notifications - and your message bus is configured for high availability, you will - need to ensure that the Identity service is correctly configured to - use it. For details on how to configure the Identity service for - this kind of deployment, see :doc:`shared-messaging`. - -.. _identity-services-config: - -Configure OpenStack services to use the highly available OpenStack Identity -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your OpenStack services now point their OpenStack Identity configuration -to the highly available virtual cluster IP address. - -#. For OpenStack Compute, (if your OpenStack Identity service IP address - is 10.0.0.11) use the following configuration in the :file:`api-paste.ini` - file: - - .. code-block:: ini - - auth_host = 10.0.0.11 - -#. Create the OpenStack Identity Endpoint with this IP address. - - .. note:: - - If you are using both private and public IP addresses, - create two virtual IP addresses and define the endpoint. For - example: - - .. code-block:: console - - $ openstack endpoint create --region $KEYSTONE_REGION \ - $service-type public http://PUBLIC_VIP:5000/v2.0 - $ openstack endpoint create --region $KEYSTONE_REGION \ - $service-type admin http://10.0.0.11:35357/v2.0 - $ openstack endpoint create --region $KEYSTONE_REGION \ - $service-type internal http://10.0.0.11:5000/v2.0 - - -#. If you are using the horizon Dashboard, edit the :file:`local_settings.py` - file to include the following: - - .. code-block:: ini - - OPENSTACK_HOST = 10.0.0.11 diff --git a/doc/ha-guide/source/controller-ha-memcached.rst b/doc/ha-guide/source/controller-ha-memcached.rst deleted file mode 100644 index b5cebcc223..0000000000 --- a/doc/ha-guide/source/controller-ha-memcached.rst +++ /dev/null @@ -1,21 +0,0 @@ -========= -Memcached -========= - -Memcached is a general-purpose distributed memory caching system. It -is used to speed up dynamic database-driven websites by caching data -and objects in RAM to reduce the number of times an external data -source must be read. - -Memcached is a memory cache demon that can be used by most OpenStack -services to store ephemeral data, such as tokens. - -Access to Memcached is not handled by HAProxy because replicated -access is currently in an experimental state. Instead, OpenStack -services must be supplied with the full list of hosts running -Memcached. - -The Memcached client implements hashing to balance objects among the -instances. Failure of an instance impacts only a percentage of the -objects and the client automatically removes it from the list of -instances. The SLA is several minutes. diff --git a/doc/ha-guide/source/controller-ha-pacemaker.rst b/doc/ha-guide/source/controller-ha-pacemaker.rst deleted file mode 100644 index 8ea63fd008..0000000000 --- a/doc/ha-guide/source/controller-ha-pacemaker.rst +++ /dev/null @@ -1,631 +0,0 @@ -======================= -Pacemaker cluster stack -======================= - -`Pacemaker `_ cluster stack is a state-of-the-art -high availability and load balancing stack for the Linux platform. -Pacemaker is used to make OpenStack infrastructure highly available. - -.. note:: - - It is storage and application-agnostic, and in no way specific to OpenStack. - -Pacemaker relies on the -`Corosync `_ messaging layer -for reliable cluster communications. Corosync implements the Totem single-ring -ordering and membership protocol. It also provides UDP and InfiniBand based -messaging, quorum, and cluster membership to Pacemaker. - -Pacemaker does not inherently understand the applications it manages. -Instead, it relies on resource agents (RAs) that are scripts that encapsulate -the knowledge of how to start, stop, and check the health of each application -managed by the cluster. - -These agents must conform to one of the `OCF `_, -`SysV Init `_, Upstart, or Systemd standards. - -Pacemaker ships with a large set of OCF agents (such as those managing -MySQL databases, virtual IP addresses, and RabbitMQ), but can also use -any agents already installed on your system and can be extended with -your own (see the -`developer guide `_). - -The steps to implement the Pacemaker cluster stack are: - -- :ref:`pacemaker-install` -- :ref:`pacemaker-corosync-setup` -- :ref:`pacemaker-corosync-start` -- :ref:`pacemaker-start` -- :ref:`pacemaker-cluster-properties` - -.. _pacemaker-install: - -Install packages -~~~~~~~~~~~~~~~~ - -On any host that is meant to be part of a Pacemaker cluster, establish cluster -communications through the Corosync messaging layer. -This involves installing the following packages (and their dependencies, which -your package manager usually installs automatically): - -- `pacemaker` - -- `pcs` (CentOS or RHEL) or crmsh - -- `corosync` - -- `fence-agents` (CentOS or RHEL) or cluster-glue - -- `resource-agents` - -- `libqb0` - -.. _pacemaker-corosync-setup: - -Set up the cluster with pcs -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Make sure `pcs` is running and configured to start at boot time: - - .. code-block:: console - - $ systemctl enable pcsd - $ systemctl start pcsd - -#. Set a password for hacluster user on each host: - - .. code-block:: console - - $ echo my-secret-password-no-dont-use-this-one \ - | passwd --stdin hacluster - - .. note:: - - Since the cluster is a single administrative domain, it is - acceptable to use the same password on all nodes. - -#. Use that password to authenticate to the nodes that will - make up the cluster: - - .. code-block:: console - - $ pcs cluster auth controller1 controller2 controller3 \ - -u hacluster -p my-secret-password-no-dont-use-this-one --force - - .. note:: - - The ``-p`` option is used to give the password on command - line and makes it easier to script. - -#. Create and name the cluster. Then, start it and enable all components to - auto-start at boot time: - - .. code-block:: console - - $ pcs cluster setup --force --name my-first-openstack-cluster \ - controller1 controller2 controller3 - $ pcs cluster start --all - $ pcs cluster enable --all - -.. note :: - - In Red Hat Enterprise Linux or CentOS environments, this is a recommended - path to perform configuration. For more information, see the `RHEL docs - `_. - -Set up the cluster with `crmsh` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After installing the Corosync package, you must create -the :file:`/etc/corosync/corosync.conf` configuration file. - -.. note:: - - For Ubuntu, you should also enable the Corosync service in the - ``/etc/default/corosync`` configuration file. - -Corosync can be configured to work with either multicast or unicast IP -addresses or to use the votequorum library. - -- :ref:`corosync-multicast` -- :ref:`corosync-unicast` -- :ref:`corosync-votequorum` - -.. _corosync-multicast: - -Set up Corosync with multicast ------------------------------- - -Most distributions ship an example configuration file -(:file:`corosync.conf.example`) as part of the documentation bundled with -the Corosync package. An example Corosync configuration file is shown below: - -**Example Corosync configuration file for multicast (``corosync.conf``)** - -.. code-block:: none - - totem { - version: 2 - - # Time (in ms) to wait for a token (1) - token: 10000 - - # How many token retransmits before forming a new - # configuration - token_retransmits_before_loss_const: 10 - - # Turn off the virtual synchrony filter - vsftype: none - - # Enable encryption (2) - secauth: on - - # How many threads to use for encryption/decryption - threads: 0 - - # This specifies the redundant ring protocol, which may be - # none, active, or passive. (3) - rrp_mode: active - - # The following is a two-ring multicast configuration. (4) - interface { - ringnumber: 0 - bindnetaddr: 10.0.0.0 - mcastaddr: 239.255.42.1 - mcastport: 5405 - } - interface { - ringnumber: 1 - bindnetaddr: 10.0.42.0 - mcastaddr: 239.255.42.2 - mcastport: 5405 - } - } - - amf { - mode: disabled - } - - service { - # Load the Pacemaker Cluster Resource Manager (5) - ver: 1 - name: pacemaker - } - - aisexec { - user: root - group: root - } - - logging { - fileline: off - to_stderr: yes - to_logfile: no - to_syslog: yes - syslog_facility: daemon - debug: off - timestamp: on - logger_subsys { - subsys: AMF - debug: off - tags: enter|leave|trace1|trace2|trace3|trace4|trace6 - }} - -Note the following: - -- The ``token`` value specifies the time, in milliseconds, - during which the Corosync token is expected - to be transmitted around the ring. - When this timeout expires, the token is declared lost, - and after ``token_retransmits_before_loss_const lost`` tokens, - the non-responding processor (cluster node) is declared dead. - ``token × token_retransmits_before_loss_const`` - is the maximum time a node is allowed to not respond to cluster messages - before being considered dead. - The default for token is 1000 milliseconds (1 second), - with 4 allowed retransmits. - These defaults are intended to minimize failover times, - but can cause frequent false alarms and unintended failovers - in case of short network interruptions. The values used here are safer, - albeit with slightly extended failover times. - -- With ``secauth`` enabled, - Corosync nodes mutually authenticates using a 128-byte shared secret - stored in the :file:`/etc/corosync/authkey` file. - This can be generated with the :command:`corosync-keygen` utility. - Cluster communications are encrypted when using ``secauth``. - -- In Corosync, configurations use redundant networking - (with more than one interface). This means you must select a Redundant - Ring Protocol (RRP) mode other than none. We recommend ``active`` as - the RRP mode. - - Note the following about the recommended interface configuration: - - - Each configured interface must have a unique ``ringnumber``, - starting with 0. - - - The ``bindnetaddr`` is the network address of the interfaces to bind to. - The example uses two network addresses of /24 IPv4 subnets. - - - Multicast groups (``mcastaddr``) must not be reused - across cluster boundaries. No two distinct clusters - should ever use the same multicast group. - Be sure to select multicast addresses compliant with - `RFC 2365, "Administratively Scoped IP Multicast" - `_. - - - For firewall configurations, Corosync communicates over UDP only, - and uses ``mcastport`` (for receives) and ``mcastport - 1`` (for sends). - -- The service declaration for the Pacemaker service - may be placed in the :file:`corosync.conf` file directly - or in its own separate file, :file:`/etc/corosync/service.d/pacemaker`. - - .. note:: - - If you are using Corosync version 2 on Ubuntu 14.04, - remove or comment out lines under the service stanza. - These stanzas enable Pacemaker to start up. Another potential - problem is the boot and shutdown order of Corosync and - Pacemaker. To force Pacemaker to start after Corosync and - stop before Corosync, fix the start and kill symlinks manually: - - .. code-block:: console - - # update-rc.d pacemaker start 20 2 3 4 5 . stop 00 0 1 6 . - - The Pacemaker service also requires an additional - configuration file ``/etc/corosync/uidgid.d/pacemaker`` - to be created with the following content: - - .. code-block:: none - - uidgid { - uid: hacluster - gid: haclient - } - -- Once created, synchronize the :file:`corosync.conf` file - (and the :file:`authkey` file if the secauth option is enabled) - across all cluster nodes. - -.. _corosync-unicast: - -Set up Corosync with unicast ----------------------------- - -For environments that do not support multicast, Corosync should be configured -for unicast. An example fragment of the :file:`corosync.conf` file -for unicastis is shown below: - -**Corosync configuration file fragment for unicast (``corosync.conf``)** - -.. code-block:: none - - totem { - #... - interface { - ringnumber: 0 - bindnetaddr: 10.0.0.0 - broadcast: yes (1) - mcastport: 5405 - } - interface { - ringnumber: 1 - bindnetaddr: 10.0.42.0 - broadcast: yes - mcastport: 5405 - } - transport: udpu (2) - } - - nodelist { (3) - node { - ring0_addr: 10.0.0.12 - ring1_addr: 10.0.42.12 - nodeid: 1 - } - node { - ring0_addr: 10.0.0.13 - ring1_addr: 10.0.42.13 - nodeid: 2 - } - node { - ring0_addr: 10.0.0.14 - ring1_addr: 10.0.42.14 - nodeid: 3 - } - } - #... - -Note the following: - -- If the ``broadcast`` parameter is set to ``yes``, the broadcast address is - used for communication. If this option is set, the ``mcastaddr`` parameter - should not be set. - -- The ``transport`` directive controls the transport mechanism. - To avoid the use of multicast entirely, specify the ``udpu`` unicast - transport parameter. This requires specifying the list of members in the - ``nodelist`` directive. This potentially makes up the membership before - deployment. The default is ``udp``. The transport type can also be set to - ``udpu`` or ``iba``. - -- Within the ``nodelist`` directive, it is possible to specify specific - information about the nodes in the cluster. The directive can contain only - the node sub-directive, which specifies every node that should be a member - of the membership, and where non-default options are needed. Every node must - have at least the ``ring0_addr`` field filled. - - .. note:: - - For UDPU, every node that should be a member of the membership must be specified. - - Possible options are: - - - ``ring{X}_addr`` specifies the IP address of one of the nodes. - ``{X}`` is the ring number. - - - ``nodeid`` is optional when using IPv4 and required when using IPv6. - This is a 32-bit value specifying the node identifier delivered to the - cluster membership service. If this is not specified with IPv4, - the node ID is determined from the 32-bit IP address of the system to which - the system is bound with ring identifier of 0. The node identifier value of - zero is reserved and should not be used. - - -.. _corosync-votequorum: - -Set up Corosync with votequorum library ---------------------------------------- - -The votequorum library is part of the Corosync project. It provides an -interface to the vote-based quorum service and it must be explicitly enabled -in the Corosync configuration file. The main role of votequorum library is to -avoid split-brain situations, but it also provides a mechanism to: - -- Query the quorum status - -- List the nodes known to the quorum service - -- Receive notifications of quorum state changes - -- Change the number of votes assigned to a node - -- Change the number of expected votes for a cluster to be quorate - -- Connect an additional quorum device to allow small clusters remain quorate - during node outages - -The votequorum library has been created to replace and eliminate ``qdisk``, the -disk-based quorum daemon for CMAN, from advanced cluster configurations. - -A sample votequorum service configuration in the :file:`corosync.conf` file is: - -.. code-block:: none - - quorum { - provider: corosync_votequorum (1) - expected_votes: 7 (2) - wait_for_all: 1 (3) - last_man_standing: 1 (4) - last_man_standing_window: 10000 (5) - } - -Note the following: - -- Specifying ``corosync_votequorum`` enables the votequorum library. - This is the only required option. - -- The cluster is fully operational with ``expected_votes`` set to 7 nodes - (each node has 1 vote), quorum: 4. If a list of nodes is specified as - ``nodelist``, the ``expected_votes`` value is ignored. - -- When you start up a cluster (all nodes down) and set ``wait_for_all`` to 1, - the cluster quorum is held until all nodes are online and have joined the - cluster for the first time. This parameter is new in Corosync 2.0. - -- Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) - feature. By default, it is disabled (set to 0). - If a cluster is on the quorum edge (``expected_votes:`` set to 7; - ``online nodes:`` set to 4) for longer than the time specified - for the ``last_man_standing_window`` parameter, the cluster can recalculate - quorum and continue operating even if the next node will be lost. - This logic is repeated until the number of online nodes in the cluster - reaches 2. In order to allow the cluster to step down from 2 members to only - 1, the ``auto_tie_breaker`` parameter needs to be set. - We do not recommended this for production environments. - -- ``last_man_standing_window`` specifies the time, in milliseconds, - required to recalculate quorum after one or more hosts - have been lost from the cluster. To perform a new quorum recalculation, - the cluster must have quorum for at least the interval - specified for ``last_man_standing_window``. The default is 10000ms. - - -.. _pacemaker-corosync-start: - -Start Corosync --------------- - -Corosync is started as a regular system service. Depending on your -distribution, it may ship with an LSB init script, an upstart job, or -a Systemd unit file. - -- Start ``corosync`` with the LSB init script: - - .. code-block:: console - - # /etc/init.d/corosync start - - Alternatively: - - .. code-block:: console - - # service corosync start - -- Start ``corosync`` with upstart: - - .. code-block:: console - - # start corosync - -- Start ``corosync`` with systemd unit file: - - .. code-block:: console - - # systemctl start corosync - -You can now check the ``corosync`` connectivity with one of these tools. - -Use the :command:`corosync-cfgtool` utility with the ``-s`` option -to get a summary of the health of the communication rings: - -.. code-block:: console - - # corosync-cfgtool -s - Printing ring status. - Local node ID 435324542 - RING ID 0 - id = 10.0.0.82 - status = ring 0 active with no faults - RING ID 1 - id = 10.0.42.100 - status = ring 1 active with no faults - -Use the :command:`corosync-objctl` utility to dump the Corosync cluster -member list: - -.. note:: - - If you are using Corosync version 2, use the :command:`corosync-cmapctl` - utility instead of :command:`corosync-objctl`; it is a direct replacement. - -.. code-block:: console - - # corosync-objctl runtime.totem.pg.mrp.srp.members - runtime.totem.pg.mrp.srp.435324542.ip=r(0) ip(10.0.0.82) r(1) ip(10.0.42.100) - runtime.totem.pg.mrp.srp.435324542.join_count=1 - runtime.totem.pg.mrp.srp.435324542.status=joined - runtime.totem.pg.mrp.srp.983895584.ip=r(0) ip(10.0.0.87) r(1) ip(10.0.42.254) - runtime.totem.pg.mrp.srp.983895584.join_count=1 - runtime.totem.pg.mrp.srp.983895584.status=joined - -You should see a ``status=joined`` entry for each of your constituent -cluster nodes. - -.. _pacemaker-start: - -Start Pacemaker ---------------- - -After the ``corosync`` service have been started and you have verified that the -cluster is communicating properly, you can start :command:`pacemakerd`, the -Pacemaker master control process. Choose one from the following four ways to -start it: - -#. Start ``pacemaker`` with the LSB init script: - - .. code-block:: console - - # /etc/init.d/pacemaker start - - Alternatively: - - .. code-block:: console - - # service pacemaker start - -#. Start ``pacemaker`` with upstart: - - .. code-block:: console - - # start pacemaker - -#. Start ``pacemaker`` with the systemd unit file: - - .. code-block:: console - - # systemctl start pacemaker - -After the ``pacemaker`` service has started, Pacemaker creates a default empty -cluster configuration with no resources. Use the :command:`crm_mon` utility to -observe the status of ``pacemaker``: - -.. code-block:: console - - # crm_mon -1 - Last updated: Sun Oct 7 21:07:52 2012 - Last change: Sun Oct 7 20:46:00 2012 via cibadmin on controller2 - Stack: openais - Current DC: controller2 - partition with quorum - Version: 1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c - 3 Nodes configured, 3 expected votes - 0 Resources configured. - - - Online: [ controller3 controller2 controller1 ] - ... - -.. _pacemaker-cluster-properties: - -Set basic cluster properties -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After you set up your Pacemaker cluster, set a few basic cluster properties: - -- ``crmsh`` - - .. code-block:: console - - $ crm configure property pe-warn-series-max="1000" \ - pe-input-series-max="1000" \ - pe-error-series-max="1000" \ - cluster-recheck-interval="5min" - -- ``pcs`` - - .. code-block:: console - - $ pcs property set pe-warn-series-max=1000 \ - pe-input-series-max=1000 \ - pe-error-series-max=1000 \ - cluster-recheck-interval=5min - -Note the following: - -- Setting the ``pe-warn-series-max``, ``pe-input-series-max``, - and ``pe-error-series-max`` parameters to 1000 - instructs Pacemaker to keep a longer history of the inputs processed - and errors and warnings generated by its Policy Engine. - This history is useful if you need to troubleshoot the cluster. - -- Pacemaker uses an event-driven approach to cluster state processing. - The ``cluster-recheck-interval`` parameter (which defaults to 15 minutes) - defines the interval at which certain Pacemaker actions occur. - It is usually prudent to reduce this to a shorter interval, - such as 5 or 3 minutes. - -By default, STONITH is enabled in Pacemaker, but STONITH mechanisms (to -shutdown a node via IPMI or ssh) are not configured. In this case Pacemaker -will refuse to start any resources. -For production cluster it is recommended to configure appropriate STONITH -mechanisms. But for demo or testing purposes STONITH can be disabled completely -as follows: - -- ``crmsh`` - - .. code-block:: console - - $ crm configure property stonith-enabled=false - -- ``pcs`` - - .. code-block:: console - - $ pcs property set stonith-enabled=false - -After you make these changes, commit the updated configuration. diff --git a/doc/ha-guide/source/controller-ha-telemetry.rst b/doc/ha-guide/source/controller-ha-telemetry.rst deleted file mode 100644 index 8347ffee24..0000000000 --- a/doc/ha-guide/source/controller-ha-telemetry.rst +++ /dev/null @@ -1,80 +0,0 @@ -========================== -Highly available Telemetry -========================== - -The `Telemetry service -`_ -provides a data collection service and an alarming service. - -Telemetry polling agent -~~~~~~~~~~~~~~~~~~~~~~~ - -The Telemetry polling agent can be configured to partition its polling -workload between multiple agents. This enables high availability (HA). - -Both the central and the compute agent can run in an HA deployment. -This means that multiple instances of these services can run in -parallel with workload partitioning among these running instances. - -The `Tooz `_ library provides -the coordination within the groups of service instances. -It provides an API above several back ends that can be used for building -distributed applications. - -Tooz supports -`various drivers `_ -including the following back end solutions: - -* `Zookeeper `_: - Recommended solution by the Tooz project. - -* `Redis `_: - Recommended solution by the Tooz project. - -* `Memcached `_: - Recommended for testing. - -You must configure a supported Tooz driver for the HA deployment of -the Telemetry services. - -For information about the required configuration options -to set in the :file:`ceilometer.conf`, see the `coordination section -`_ -in the OpenStack Configuration Reference. - -.. note:: - - Only one instance for the central and compute agent service(s) is able - to run and function correctly if the ``backend_url`` option is not set. - -The availability check of the instances is provided by heartbeat messages. -When the connection with an instance is lost, the workload will be -reassigned within the remaining instances in the next polling cycle. - -.. note:: - - Memcached uses a timeout value, which should always be set to - a value that is higher than the heartbeat value set for Telemetry. - -For backward compatibility and supporting existing deployments, the central -agent configuration supports using different configuration files. This is for -groups of service instances that are running in parallel. -For enabling this configuration, set a value for the -``partitioning_group_prefix`` option in the -`polling section `_ -in the OpenStack Configuration Reference. - -.. warning:: - - For each sub-group of the central agent pool with the same - ``partitioning_group_prefix``, a disjoint subset of meters must be polled - to avoid samples being missing or duplicated. The list of meters to poll - can be set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. - For more information about pipelines see the `Data processing and pipelines - `_ - section. - -To enable the compute agent to run multiple instances simultaneously with -workload partitioning, the ``workload_partitioning`` option must be set to -``True`` under the `compute section `_ -in the :file:`ceilometer.conf` configuration file. diff --git a/doc/ha-guide/source/controller-ha-vip.rst b/doc/ha-guide/source/controller-ha-vip.rst deleted file mode 100644 index 15104a7140..0000000000 --- a/doc/ha-guide/source/controller-ha-vip.rst +++ /dev/null @@ -1,23 +0,0 @@ -================= -Configure the VIP -================= - -You must select and assign a virtual IP address (VIP) that can freely float -between cluster nodes. - -This configuration creates ``vip``, a virtual IP address for use by the -API node (``10.0.0.11``). - -For ``crmsh``: - -.. code-block:: console - - # crm configure primitive vip ocf:heartbeat:IPaddr2 \ - params ip="10.0.0.11" cidr_netmask="24" op monitor interval="30s" - -For ``pcs``: - -.. code-block:: console - - # pcs resource create vip ocf:heartbeat:IPaddr2 \ - ip="10.0.0.11" cidr_netmask="24" op monitor interval="30s" diff --git a/doc/ha-guide/source/controller-ha.rst b/doc/ha-guide/source/controller-ha.rst deleted file mode 100644 index ff99bc1dfa..0000000000 --- a/doc/ha-guide/source/controller-ha.rst +++ /dev/null @@ -1,73 +0,0 @@ -========================== -Configuring the controller -========================== - -The cloud controller runs on the management network and must talk to -all other services. - -.. toctree:: - :maxdepth: 2 - - intro-ha-arch-pacemaker.rst - controller-ha-pacemaker.rst - controller-ha-vip.rst - controller-ha-haproxy.rst - controller-ha-memcached.rst - controller-ha-identity.rst - controller-ha-telemetry.rst - -Overview of highly available controllers -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack is a set of services exposed to the end users -as HTTP(s) APIs. Additionally, for your own internal usage, OpenStack -requires an SQL database server and AMQP broker. The physical servers, -where all the components are running, are called controllers. -This modular OpenStack architecture allows you to duplicate all the -components and run them on different controllers. -By making all the components redundant, it is possible to make -OpenStack highly available. - -In general, we can divide all the OpenStack components into three categories: - -- OpenStack APIs: APIs that are HTTP(s) stateless services written in python, - easy to duplicate and mostly easy to load balance. - -- The SQL relational database server provides stateful type consumed by other - components. Supported databases are MySQL, MariaDB, and PostgreSQL. - Making the SQL database redundant is complex. - -- :term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack - internal stateful communication service. - -Common deployment architectures -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -We recommend two primary architectures for making OpenStack highly available. - -The architectures differ in the sets of services managed by the -cluster. - -Both use a cluster manager, such as Pacemaker or Veritas, to -orchestrate the actions of the various services across a set of -machines. Because we are focused on FOSS, we refer to these as -Pacemaker architectures. - -Traditionally, Pacemaker has been positioned as an all-encompassing -solution. However, as OpenStack services have matured, they are -increasingly able to run in an active/active configuration and -gracefully tolerate the disappearance of the APIs on which they -depend. - -With this in mind, some vendors are restricting Pacemaker's use to -services that must operate in an active/passive mode (such as -``cinder-volume``), those with multiple states (for example, Galera), and -those with complex bootstrapping procedures (such as RabbitMQ). - -The majority of services, needing no real orchestration, are handled -by systemd on each node. This approach avoids the need to coordinate -service upgrades or location changes with the cluster and has the -added advantage of more easily scaling beyond Corosync's 16 node -limit. However, it will generally require the addition of an -enterprise monitoring solution such as Nagios or Sensu for those -wanting centralized failure reporting. diff --git a/doc/ha-guide/source/environment-hardware.rst b/doc/ha-guide/source/environment-hardware.rst deleted file mode 100644 index 23a993f22e..0000000000 --- a/doc/ha-guide/source/environment-hardware.rst +++ /dev/null @@ -1,61 +0,0 @@ -============================================= -Hardware considerations for high availability -============================================= - -When you use high availability, consider the hardware requirements needed -for your application. - -Hardware setup -~~~~~~~~~~~~~~ - -The following are the standard hardware requirements: - -- Provider networks: See the *Overview -> Networking Option 1: Provider - networks* section of the - `Install Guides `_ - depending on your distribution. -- Self-service networks: See the *Overview -> Networking Option 2: - Self-service networks* section of the - `Install Guides `_ - depending on your distribution. - -OpenStack does not require a significant amount of resources and the following -minimum requirements should support a proof-of-concept high availability -environment with core services and several instances: - -+-------------------+------------------+----------+-----------+------+ -| Node type | Processor Cores | Memory | Storage | NIC | -+===================+==================+==========+===========+======+ -| controller node | 4 | 12 GB | 120 GB | 2 | -+-------------------+------------------+----------+-----------+------+ -| compute node | 8+ | 12+ GB | 120+ GB | 2 | -+-------------------+------------------+----------+-----------+------+ - -We recommended that the maximum latency between any two controller -nodes is 2 milliseconds. Although the cluster software can be tuned to -operate at higher latencies, some vendors insist on this value before -agreeing to support the installation. - -You can use the `ping` command to find the latency between two servers. - -Virtualized hardware -~~~~~~~~~~~~~~~~~~~~ - -For demonstrations and studying, you can set up a test environment on virtual -machines (VMs). This has the following benefits: - -- One physical server can support multiple nodes, - each of which supports almost any number of network interfaces. - -- You can take periodic snap shots throughout the installation process - and roll back to a working configuration in the event of a problem. - -However, running an OpenStack environment on VMs degrades the performance of -your instances, particularly if your hypervisor or processor lacks -support for hardware acceleration of nested VMs. - -.. note:: - - When installing highly available OpenStack on VMs, - be sure that your hypervisor permits promiscuous mode - and disables MAC address filtering on the external network. diff --git a/doc/ha-guide/source/environment-memcached.rst b/doc/ha-guide/source/environment-memcached.rst deleted file mode 100644 index cdf01a6ce1..0000000000 --- a/doc/ha-guide/source/environment-memcached.rst +++ /dev/null @@ -1,32 +0,0 @@ -==================== -Installing Memcached -==================== - -Most OpenStack services can use Memcached to store ephemeral data such as -tokens. Although Memcached does not support typical forms of redundancy such -as clustering, OpenStack services can use almost any number of instances -by configuring multiple hostnames or IP addresses. - -The Memcached client implements hashing to balance objects among the instances. -Failure of an instance only impacts a percentage of the objects, -and the client automatically removes it from the list of instances. - -To install and configure Memcached, read the -`official documentation `_. - -Memory caching is managed by `oslo.cache -`_. -This ensures consistency across all projects when using multiple Memcached -servers. The following is an example configuration with three hosts: - -.. code-block:: ini - - Memcached_servers = controller1:11211,controller2:11211,controller3:11211 - -By default, ``controller1`` handles the caching service. If the host goes down, -``controller2`` or ``controller3`` will complete the service. - -For more information about Memcached installation, see the -*Environment -> Memcached* section in the -`Installation Guides `_ -depending on your distribution. diff --git a/doc/ha-guide/source/environment-ntp.rst b/doc/ha-guide/source/environment-ntp.rst deleted file mode 100644 index 705986941e..0000000000 --- a/doc/ha-guide/source/environment-ntp.rst +++ /dev/null @@ -1,10 +0,0 @@ -============= -Configure NTP -============= - -You must configure NTP to properly synchronize services among nodes. -We recommend that you configure the controller node to reference -more accurate (lower stratum) servers and other nodes to reference -the controller node. For more information, see the -`Installation Guides -`_. diff --git a/doc/ha-guide/source/environment-operatingsystem.rst b/doc/ha-guide/source/environment-operatingsystem.rst deleted file mode 100644 index cada295540..0000000000 --- a/doc/ha-guide/source/environment-operatingsystem.rst +++ /dev/null @@ -1,29 +0,0 @@ -=============================== -Installing the operating system -=============================== - -The first step in setting up your highly available OpenStack cluster -is to install the operating system on each node. -Follow the instructions in the *Environment* section of the -`Installation Guides `_ -depending on your distribution. - -The OpenStack Installation Guides also include a list of -the services that use passwords with important notes about using -them. - -.. note:: - - Before following this guide to configure the highly available - OpenStack cluster, ensure the IP ``10.0.0.11`` and hostname - ``controller`` are not in use. - -This guide uses the following example IP addresses: - -.. code-block:: none - - # controller - 10.0.0.11 controller # virtual IP - 10.0.0.12 controller1 - 10.0.0.13 controller2 - 10.0.0.14 controller3 diff --git a/doc/ha-guide/source/environment.rst b/doc/ha-guide/source/environment.rst deleted file mode 100644 index 1f363afd78..0000000000 --- a/doc/ha-guide/source/environment.rst +++ /dev/null @@ -1,14 +0,0 @@ -================================= -Configuring the basic environment -================================= - -This chapter describes the basic environment for high availability, -such as hardware, operating system, common services. - -.. toctree:: - :maxdepth: 2 - - environment-hardware.rst - environment-operatingsystem.rst - environment-ntp.rst - environment-memcached.rst diff --git a/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png b/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png deleted file mode 100644 index 91feec0bb1..0000000000 Binary files a/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png and /dev/null differ diff --git a/doc/ha-guide/source/figures/Cluster-deployment-segregated.png b/doc/ha-guide/source/figures/Cluster-deployment-segregated.png deleted file mode 100644 index a504ae18aa..0000000000 Binary files a/doc/ha-guide/source/figures/Cluster-deployment-segregated.png and /dev/null differ diff --git a/doc/ha-guide/source/index.rst b/doc/ha-guide/source/index.rst deleted file mode 100644 index 3a8272f780..0000000000 --- a/doc/ha-guide/source/index.rst +++ /dev/null @@ -1,50 +0,0 @@ -================================= -OpenStack High Availability Guide -================================= - -Abstract -~~~~~~~~ - -This guide describes how to install and configure OpenStack for high -availability. It supplements the Installation Guides -and assumes that you are familiar with the material in those guides. - -.. important:: - - This guide was last updated as of the Ocata release, documenting - the OpenStack Ocata, Newton, and Mitaka releases. It may - not apply to EOL releases Kilo and Liberty. - - We advise that you read this at your own discretion when planning - on your OpenStack cloud. - - This guide is intended as advice only. - - The OpenStack HA team is based on voluntary contributions from - the OpenStack community. You can contact the HA community - directly in the #openstack-ha channel on Freenode IRC, or by - sending mail to the openstack-dev mailing list with the [HA] prefix in - the subject header. - - The OpenStack HA community used to hold `weekly IRC meetings - `_ to discuss - a range of topics relating to HA in OpenStack. The - `logs of all past meetings - `_ are still available to - read. - -Contents -~~~~~~~~ - -.. toctree:: - :maxdepth: 2 - - common/conventions.rst - intro-ha.rst - environment.rst - shared-services.rst - controller-ha.rst - networking-ha.rst - storage-ha.rst - compute-node-ha.rst - appendix.rst diff --git a/doc/ha-guide/source/intro-ha-arch-pacemaker.rst b/doc/ha-guide/source/intro-ha-arch-pacemaker.rst deleted file mode 100644 index afa231cbb8..0000000000 --- a/doc/ha-guide/source/intro-ha-arch-pacemaker.rst +++ /dev/null @@ -1,190 +0,0 @@ -========================== -The Pacemaker architecture -========================== - -What is a cluster manager? -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -At its core, a cluster is a distributed finite state machine capable -of co-ordinating the startup and recovery of inter-related services -across a set of machines. - -Even a distributed or replicated application that is able to survive failures -on one or more machines can benefit from a cluster manager because a cluster -manager has the following capabilities: - -#. Awareness of other applications in the stack - - While SYS-V init replacements like systemd can provide - deterministic recovery of a complex stack of services, the - recovery is limited to one machine and lacks the context of what - is happening on other machines. This context is crucial to - determine the difference between a local failure, and clean startup - and recovery after a total site failure. - -#. Awareness of instances on other machines - - Services like RabbitMQ and Galera have complicated boot-up - sequences that require co-ordination, and often serialization, of - startup operations across all machines in the cluster. This is - especially true after a site-wide failure or shutdown where you must - first determine the last machine to be active. - -#. A shared implementation and calculation of `quorum - `_ - - It is very important that all members of the system share the same - view of who their peers are and whether or not they are in the - majority. Failure to do this leads very quickly to an internal - `split-brain `_ - state. This is where different parts of the system are pulling in - different and incompatible directions. - -#. Data integrity through fencing (a non-responsive process does not - imply it is not doing anything) - - A single application does not have sufficient context to know the - difference between failure of a machine and failure of the - application on a machine. The usual practice is to assume the - machine is dead and continue working, however this is highly risky. A - rogue process or machine could still be responding to requests and - generally causing havoc. The safer approach is to make use of - remotely accessible power switches and/or network switches and SAN - controllers to fence (isolate) the machine before continuing. - -#. Automated recovery of failed instances - - While the application can still run after the failure of several - instances, it may not have sufficient capacity to serve the - required volume of requests. A cluster can automatically recover - failed instances to prevent additional load induced failures. - -For these reasons, we highly recommend the use of a cluster manager like -`Pacemaker `_. - -Deployment flavors -~~~~~~~~~~~~~~~~~~ - -It is possible to deploy three different flavors of the Pacemaker -architecture. The two extremes are ``Collapsed`` (where every -component runs on every node) and ``Segregated`` (where every -component runs in its own 3+ node cluster). - -Regardless of which flavor you choose, we recommend that -clusters contain at least three nodes so that you can take advantage of -`quorum `_. - -Quorum becomes important when a failure causes the cluster to split in -two or more partitions. In this situation, you want the majority members of -the system to ensure the minority are truly dead (through fencing) and continue -to host resources. For a two-node cluster, no side has the majority and -you can end up in a situation where both sides fence each other, or -both sides are running the same services. This can lead to data corruption. - -Clusters with an even number of hosts suffer from similar issues. A -single network failure could easily cause a N:N split where neither -side retains a majority. For this reason, we recommend an odd number -of cluster members when scaling up. - -You can have up to 16 cluster members (this is currently limited by -the ability of corosync to scale higher). In extreme cases, 32 and -even up to 64 nodes could be possible. However, this is not well tested. - -Collapsed ---------- - -In a collapsed configuration, there is a single cluster of 3 or more -nodes on which every component is running. - -This scenario has the advantage of requiring far fewer, if more -powerful, machines. Additionally, being part of a single cluster -allows you to accurately model the ordering dependencies between -components. - -This scenario can be visualized as below. - -.. image:: /figures/Cluster-deployment-collapsed.png - :width: 100% - -You would choose this option if you prefer to have fewer but more -powerful boxes. - -This is the most common option and the one we document here. - -Segregated ----------- - -In this configuration, each service runs in a dedicated cluster of -3 or more nodes. - -The benefits of this approach are the physical isolation between -components and the ability to add capacity to specific components. - -You would choose this option if you prefer to have more but -less powerful boxes. - -This scenario can be visualized as below, where each box below -represents a cluster of three or more guests. - -.. image:: /figures/Cluster-deployment-segregated.png - :width: 100% - -Mixed ------ - -It is also possible to follow a segregated approach for one or more -components that are expected to be a bottleneck and use a collapsed -approach for the remainder. - -Proxy server -~~~~~~~~~~~~ - -Almost all services in this stack benefit from being proxied. -Using a proxy server provides the following capabilities: - -#. Load distribution - - Many services can act in an active/active capacity, however, they - usually require an external mechanism for distributing requests to - one of the available instances. The proxy server can serve this - role. - -#. API isolation - - By sending all API access through the proxy, you can clearly - identify service interdependencies. You can also move them to - locations other than ``localhost`` to increase capacity if the - need arises. - -#. Simplified process for adding/removing of nodes - - Since all API access is directed to the proxy, adding or removing - nodes has no impact on the configuration of other services. This - can be very useful in upgrade scenarios where an entirely new set - of machines can be configured and tested in isolation before - telling the proxy to direct traffic there instead. - -#. Enhanced failure detection - - The proxy can be configured as a secondary mechanism for detecting - service failures. It can even be configured to look for nodes in - a degraded state (such as being too far behind in the - replication) and take them out of circulation. - -The following components are currently unable to benefit from the use -of a proxy server: - -* RabbitMQ -* Memcached -* MongoDB - -We recommend HAProxy as the load balancer, however, there are many alternative -load balancing solutions in the marketplace. - -Generally, we use round-robin to distribute load amongst instances of -active/active services. Alternatively, Galera uses ``stick-table`` options -to ensure that incoming connection to virtual IP (VIP) are directed to only one -of the available back ends. This helps avoid lock contention and prevent -deadlocks, although Galera can run active/active. Used in combination with -the ``httpchk`` option, this ensure only nodes that are in sync with their -peers are allowed to handle requests. diff --git a/doc/ha-guide/source/intro-ha.rst b/doc/ha-guide/source/intro-ha.rst deleted file mode 100644 index fb27dbe734..0000000000 --- a/doc/ha-guide/source/intro-ha.rst +++ /dev/null @@ -1,208 +0,0 @@ -=========================================== -Introduction to OpenStack high availability -=========================================== - -High availability systems seek to minimize the following issues: - -#. System downtime: Occurs when a user-facing service is unavailable - beyond a specified maximum amount of time. - -#. Data loss: Accidental deletion or destruction of data. - -Most high availability systems guarantee protection against system downtime -and data loss only in the event of a single failure. -However, they are also expected to protect against cascading failures, -where a single failure deteriorates into a series of consequential failures. -Many service providers guarantee a :term:`Service Level Agreement (SLA)` -including uptime percentage of computing service, which is calculated based -on the available time and system downtime excluding planned outage time. - -Redundancy and failover -~~~~~~~~~~~~~~~~~~~~~~~ - -High availability is implemented with redundant hardware -running redundant instances of each service. -If one piece of hardware running one instance of a service fails, -the system can then failover to use another instance of a service -that is running on hardware that did not fail. - -A crucial aspect of high availability -is the elimination of single points of failure (SPOFs). -A SPOF is an individual piece of equipment or software -that causes system downtime or data loss if it fails. -In order to eliminate SPOFs, check that mechanisms exist for redundancy of: - -- Network components, such as switches and routers - -- Applications and automatic service migration - -- Storage components - -- Facility services such as power, air conditioning, and fire protection - -In the event that a component fails and a back-up system must take on -its load, most high availability systems will replace the failed -component as quickly as possible to maintain necessary redundancy. This -way time spent in a degraded protection state is minimized. - -Most high availability systems fail in the event of multiple -independent (non-consequential) failures. In this case, most -implementations favor protecting data over maintaining availability. - -High availability systems typically achieve an uptime percentage of -99.99% or more, which roughly equates to less than an hour of -cumulative downtime per year. In order to achieve this, high -availability systems should keep recovery times after a failure to -about one to two minutes, sometimes significantly less. - -OpenStack currently meets such availability requirements for its own -infrastructure services, meaning that an uptime of 99.99% is feasible -for the OpenStack infrastructure proper. However, OpenStack does not -guarantee 99.99% availability for individual guest instances. - -This document discusses some common methods of implementing highly -available systems, with an emphasis on the core OpenStack services and -other open source services that are closely aligned with OpenStack. - -You will need to address high availability concerns for any applications -software that you run on your OpenStack environment. The important thing is -to make sure that your services are redundant and available. -How you achieve that is up to you. - -Stateless versus stateful services -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following are the definitions of stateless and stateful services: - -Stateless service - A service that provides a response after your request - and then requires no further attention. - To make a stateless service highly available, - you need to provide redundant instances and load balance them. - OpenStack services that are stateless include ``nova-api``, - ``nova-conductor``, ``glance-api``, ``keystone-api``, - ``neutron-api``, and ``nova-scheduler``. - -Stateful service - A service where subsequent requests to the service - depend on the results of the first request. - Stateful services are more difficult to manage because a single - action typically involves more than one request. Providing - additional instances and load balancing does not solve the problem. - For example, if the horizon user interface reset itself every time - you went to a new page, it would not be very useful. - OpenStack services that are stateful include the OpenStack database - and message queue. - Making stateful services highly available can depend on whether you choose - an active/passive or active/active configuration. - -Active/passive versus active/active -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Stateful services can be configured as active/passive or active/active, -which are defined as follows: - -:term:`active/passive configuration` - Maintains a redundant instance - that can be brought online when the active service fails. - For example, OpenStack writes to the main database - while maintaining a disaster recovery database that can be brought online - if the main database fails. - - A typical active/passive installation for a stateful service maintains - a replacement resource that can be brought online when required. - Requests are handled using a :term:`virtual IP address (VIP)` that - facilitates returning to service with minimal reconfiguration. - A separate application (such as Pacemaker or Corosync) monitors - these services, bringing the backup online as necessary. - -:term:`active/active configuration` - Each service also has a backup but manages both the main and - redundant systems concurrently. - This way, if there is a failure, the user is unlikely to notice. - The backup system is already online and takes on increased load - while the main system is fixed and brought back online. - - Typically, an active/active installation for a stateless service - maintains a redundant instance, and requests are load balanced using - a virtual IP address and a load balancer such as HAProxy. - - A typical active/active installation for a stateful service includes - redundant services, with all instances having an identical state. In - other words, updates to one instance of a database update all other - instances. This way a request to one instance is the same as a - request to any other. A load balancer manages the traffic to these - systems, ensuring that operational systems always handle the - request. - -Clusters and quorums -~~~~~~~~~~~~~~~~~~~~ - -The quorum specifies the minimal number of nodes -that must be functional in a cluster of redundant nodes -in order for the cluster to remain functional. -When one node fails and failover transfers control to other nodes, -the system must ensure that data and processes remain sane. -To determine this, the contents of the remaining nodes are compared -and, if there are discrepancies, a majority rules algorithm is implemented. - -For this reason, each cluster in a high availability environment should -have an odd number of nodes and the quorum is defined as more than a half -of the nodes. -If multiple nodes fail so that the cluster size falls below the quorum -value, the cluster itself fails. - -For example, in a seven-node cluster, the quorum should be set to -``floor(7/2) + 1 == 4``. If quorum is four and four nodes fail simultaneously, -the cluster itself would fail, whereas it would continue to function, if -no more than three nodes fail. If split to partitions of three and four nodes -respectively, the quorum of four nodes would continue to operate the majority -partition and stop or fence the minority one (depending on the -no-quorum-policy cluster configuration). - -And the quorum could also have been set to three, just as a configuration -example. - -.. note:: - - We do not recommend setting the quorum to a value less than ``floor(n/2) + 1`` - as it would likely cause a split-brain in a face of network partitions. - -When four nodes fail simultaneously, the cluster would continue to function as -well. But if split to partitions of three and four nodes respectively, the -quorum of three would have made both sides to attempt to fence the other and -host resources. Without fencing enabled, it would go straight to running -two copies of each resource. - -This is why setting the quorum to a value less than ``floor(n/2) + 1`` is -dangerous. However it may be required for some specific cases, such as a -temporary measure at a point it is known with 100% certainty that the other -nodes are down. - -When configuring an OpenStack environment for study or demonstration purposes, -it is possible to turn off the quorum checking. Production systems should -always run with quorum enabled. - - -Single-controller high availability mode -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack supports a single-controller high availability mode -that is managed by the services that manage highly available environments -but is not actually highly available because -no redundant controllers are configured to use for failover. -This environment can be used for study and demonstration -but is not appropriate for a production environment. - -It is possible to add controllers to such an environment -to convert it into a truly highly available environment. - -High availability is not for every user. It presents some challenges. -High availability may be too complex for databases or -systems with large amounts of data. Replication can slow large systems -down. Different setups have different prerequisites. Read the guidelines -for each setup. - -.. important:: - - High availability is turned off as the default in OpenStack setups. diff --git a/doc/ha-guide/source/locale/en_GB/LC_MESSAGES/ha-guide.po b/doc/ha-guide/source/locale/en_GB/LC_MESSAGES/ha-guide.po deleted file mode 100644 index e7a0663e1a..0000000000 --- a/doc/ha-guide/source/locale/en_GB/LC_MESSAGES/ha-guide.po +++ /dev/null @@ -1,4161 +0,0 @@ -# Andi Chandler , 2017. #zanata -# Andi Chandler , 2018. #zanata -msgid "" -msgstr "" -"Project-Id-Version: openstackhaguide\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2018-08-22 22:08+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2018-08-22 02:44+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en_GB\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "**Cluster Address**: List the IP addresses for each cluster node." -msgstr "**Cluster Address**: List the IP addresses for each cluster node." - -msgid "**Cluster Name**: Define an arbitrary name for your cluster." -msgstr "**Cluster Name**: Define an arbitrary name for your cluster." - -msgid "" -"**Corosync configuration file fragment for unicast (``corosync.conf``)**" -msgstr "" -"**Corosync configuration file fragment for unicast (``corosync.conf``)**" - -msgid "" -"**Example Corosync configuration file for multicast (``corosync.conf``)**" -msgstr "" -"**Example Corosync configuration file for multicast (``corosync.conf``)**" - -msgid "**Node Address**: Define the IP address of the cluster node." -msgstr "**Node Address**: Define the IP address of the cluster node." - -msgid "**Node Name**: Define the logical name of the cluster node." -msgstr "**Node Name**: Define the logical name of the cluster node." - -msgid "" -"**wsrep Provider**: The Galera Replication Plugin serves as the ``wsrep`` " -"provider for Galera Cluster. It is installed on your system as the " -"``libgalera_smm.so`` file. Define the path to this file in your ``my.cnf``:" -msgstr "" -"**wsrep Provider**: The Galera Replication Plugin serves as the ``wsrep`` " -"provider for Galera Cluster. It is installed on your system as the " -"``libgalera_smm.so`` file. Define the path to this file in your ``my.cnf``:" - -msgid "/etc/neutron/neutron.conf parameters for high availability" -msgstr "/etc/neutron/neutron.conf parameters for High Availability" - -msgid "12 GB" -msgstr "12 GB" - -msgid "12+ GB" -msgstr "12+ GB" - -msgid "120 GB" -msgstr "120 GB" - -msgid "120+ GB" -msgstr "120+ GB" - -msgid "2" -msgstr "2" - -msgid "2 or more" -msgstr "2 or more" - -msgid "4" -msgstr "4" - -msgid "8+" -msgstr "8+" - -msgid ":doc:`Networking DHCP agent`" -msgstr ":doc:`Networking DHCP agent`" - -msgid ":doc:`Neutron L3 agent`" -msgstr ":doc:`Neutron L3 agent`" - -msgid "" -":ref:`Configure OpenStack services to use RabbitMQ HA queues `" -msgstr "" -":ref:`Configure OpenStack services to use RabbitMQ HA queues `" - -msgid ":ref:`Configure RabbitMQ for HA queues`" -msgstr ":ref:`Configure RabbitMQ for HA queues`" - -msgid ":ref:`Install RabbitMQ`" -msgstr ":ref:`Install RabbitMQ`" - -msgid ":ref:`corosync-multicast`" -msgstr ":ref:`corosync-multicast`" - -msgid ":ref:`corosync-unicast`" -msgstr ":ref:`corosync-unicast`" - -msgid ":ref:`corosync-votequorum`" -msgstr ":ref:`corosync-votequorum`" - -msgid ":ref:`glance-api-configure`" -msgstr ":ref:`glance-api-configure`" - -msgid ":ref:`glance-api-pacemaker`" -msgstr ":ref:`glance-api-pacemaker`" - -msgid ":ref:`glance-services`" -msgstr ":ref:`glance-services`" - -msgid ":ref:`ha-blockstorage-configure`" -msgstr ":ref:`ha-blockstorage-configure`" - -msgid ":ref:`ha-blockstorage-pacemaker`" -msgstr ":ref:`ha-blockstorage-pacemaker`" - -msgid ":ref:`ha-blockstorage-services`" -msgstr ":ref:`ha-blockstorage-services`" - -msgid ":ref:`ha-sharedfilesystems-configure`" -msgstr ":ref:`ha-sharedfilesystems-configure`" - -msgid ":ref:`ha-sharedfilesystems-pacemaker`" -msgstr ":ref:`ha-sharedfilesystems-pacemaker`" - -msgid ":ref:`ha-sharedfilesystems-services`" -msgstr ":ref:`ha-sharedfilesystems-services`" - -msgid ":ref:`identity-config-identity`" -msgstr ":ref:`identity-config-identity`" - -msgid ":ref:`identity-pacemaker`" -msgstr ":ref:`identity-pacemaker`" - -msgid ":ref:`identity-services-config`" -msgstr ":ref:`identity-services-config`" - -msgid ":ref:`pacemaker-cluster-properties`" -msgstr ":ref:`pacemaker-cluster-properties`" - -msgid ":ref:`pacemaker-corosync-setup`" -msgstr ":ref:`pacemaker-corosync-setup`" - -msgid ":ref:`pacemaker-corosync-start`" -msgstr ":ref:`pacemaker-corosync-start`" - -msgid ":ref:`pacemaker-install`" -msgstr ":ref:`pacemaker-install`" - -msgid ":ref:`pacemaker-start`" -msgstr ":ref:`pacemaker-start`" - -msgid "" -":term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack internal " -"stateful communication service." -msgstr "" -":term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack internal " -"stateful communication service." - -msgid ":term:`active/active configuration`" -msgstr ":term:`active/active configuration`" - -msgid ":term:`active/passive configuration`" -msgstr ":term:`active/passive configuration`" - -msgid "" -"A crucial aspect of high availability is the elimination of single points of " -"failure (SPOFs). A SPOF is an individual piece of equipment or software that " -"causes system downtime or data loss if it fails. In order to eliminate " -"SPOFs, check that mechanisms exist for redundancy of:" -msgstr "" -"A crucial aspect of high availability is the elimination of single points of " -"failure (SPOFs). A SPOF is an individual piece of equipment or software that " -"causes system downtime or data loss if it fails. In order to eliminate " -"SPOFs, check that mechanisms exist for redundancy of:" - -msgid "A minimum of three hosts" -msgstr "A minimum of three hosts" - -msgid "" -"A sample votequorum service configuration in the :file:`corosync.conf` file " -"is:" -msgstr "" -"A sample votequorum service configuration in the :file:`corosync.conf` file " -"is:" - -msgid "" -"A service that provides a response after your request and then requires no " -"further attention. To make a stateless service highly available, you need to " -"provide redundant instances and load balance them. OpenStack services that " -"are stateless include ``nova-api``, ``nova-conductor``, ``glance-api``, " -"``keystone-api``, ``neutron-api``, and ``nova-scheduler``." -msgstr "" -"A service that provides a response after your request and then requires no " -"further attention. To make a stateless service highly available, you need to " -"provide redundant instances and load balance them. OpenStack services that " -"are stateless include ``nova-api``, ``nova-conductor``, ``glance-api``, " -"``keystone-api``, ``neutron-api``, and ``nova-scheduler``." - -msgid "" -"A service where subsequent requests to the service depend on the results of " -"the first request. Stateful services are more difficult to manage because a " -"single action typically involves more than one request. Providing additional " -"instances and load balancing does not solve the problem. For example, if the " -"horizon user interface reset itself every time you went to a new page, it " -"would not be very useful. OpenStack services that are stateful include the " -"OpenStack database and message queue. Making stateful services highly " -"available can depend on whether you choose an active/passive or active/" -"active configuration." -msgstr "" -"A service where subsequent requests to the service depend on the results of " -"the first request. Stateful services are more difficult to manage because a " -"single action typically involves more than one request. Providing additional " -"instances and load balancing does not solve the problem. For example, if the " -"Horizon user interface reset itself every time you went to a new page, it " -"would not be very useful. OpenStack services that are stateful include the " -"OpenStack database and message queue. Making stateful services highly " -"available can depend on whether you choose an active/passive or active/" -"active configuration." - -msgid "" -"A shared implementation and calculation of `quorum `_" -msgstr "" -"A shared implementation and calculation of `quorum `_" - -msgid "" -"A single application does not have sufficient context to know the difference " -"between failure of a machine and failure of the application on a machine. " -"The usual practice is to assume the machine is dead and continue working, " -"however this is highly risky. A rogue process or machine could still be " -"responding to requests and generally causing havoc. The safer approach is to " -"make use of remotely accessible power switches and/or network switches and " -"SAN controllers to fence (isolate) the machine before continuing." -msgstr "" -"A single application does not have sufficient context to know the difference " -"between failure of a machine and failure of the application on a machine. " -"The usual practice is to assume the machine is dead and continue working, " -"however this is highly risky. A rogue process or machine could still be " -"responding to requests and generally causing havoc. The safer approach is to " -"make use of remotely accessible power switches and/or network switches and " -"SAN controllers to fence (isolate) the machine before continuing." - -msgid "" -"A typical active/active installation for a stateful service includes " -"redundant services, with all instances having an identical state. In other " -"words, updates to one instance of a database update all other instances. " -"This way a request to one instance is the same as a request to any other. A " -"load balancer manages the traffic to these systems, ensuring that " -"operational systems always handle the request." -msgstr "" -"A typical active/active installation for a stateful service includes " -"redundant services, with all instances having an identical state. In other " -"words, updates to one instance of a database update all other instances. " -"This way a request to one instance is the same as a request to any other. A " -"load balancer manages the traffic to these systems, ensuring that " -"operational systems always handle the request." - -msgid "" -"A typical active/passive installation for a stateful service maintains a " -"replacement resource that can be brought online when required. Requests are " -"handled using a :term:`virtual IP address (VIP)` that facilitates returning " -"to service with minimal reconfiguration. A separate application (such as " -"Pacemaker or Corosync) monitors these services, bringing the backup online " -"as necessary." -msgstr "" -"A typical active/passive installation for a stateful service maintains a " -"replacement resource that can be brought online when required. Requests are " -"handled using a :term:`virtual IP address (VIP)` that facilitates returning " -"to service with minimal reconfiguration. A separate application (such as " -"Pacemaker or Corosync) monitors these services, bringing the backup online " -"as necessary." - -msgid "API isolation" -msgstr "API isolation" - -msgid "Abstract" -msgstr "Abstract" - -msgid "" -"Access to Memcached is not handled by HAProxy because replicated access is " -"currently in an experimental state. Instead, OpenStack services must be " -"supplied with the full list of hosts running Memcached." -msgstr "" -"Access to Memcached is not handled by HAProxy because replicated access is " -"currently in an experimental state. Instead, OpenStack services must be " -"supplied with the full list of hosts running Memcached." - -msgid "" -"Access to RabbitMQ is not normally handled by HAProxy. Instead, consumers " -"must be supplied with the full list of hosts running RabbitMQ with " -"``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` option. For more " -"information, read the `core issue `_. For more detail, read the `history and " -"solution `_." -msgstr "" -"Access to RabbitMQ is not normally handled by HAProxy. Instead, consumers " -"must be supplied with the full list of hosts running RabbitMQ with " -"``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` option. For more " -"information, read the `core issue `_. For more detail, read the `history and " -"solution `_." - -msgid "Active/passive versus active/active" -msgstr "Active/passive versus active/active" - -msgid "Add Block Storage API resource to Pacemaker" -msgstr "Add Block Storage API resource to Pacemaker" - -msgid "" -"Add HAProxy to the cluster and ensure the VIPs can only run on machines " -"where HAProxy is active:" -msgstr "" -"Add HAProxy to the cluster and ensure the VIPs can only run on machines " -"where HAProxy is active:" - -msgid "Add OpenStack Identity resource to Pacemaker" -msgstr "Add OpenStack Identity resource to Pacemaker" - -msgid "Add OpenStack Image API resource to Pacemaker" -msgstr "Add OpenStack Image API resource to Pacemaker" - -msgid "Add Shared File Systems API resource to Pacemaker" -msgstr "Add Shared File Systems API resource to Pacemaker" - -msgid "" -"Add the Pacemaker configuration for the OpenStack Identity resource by " -"running the following command to connect to the Pacemaker cluster:" -msgstr "" -"Add the Pacemaker configuration for the OpenStack Identity resource by " -"running the following command to connect to the Pacemaker cluster:" - -msgid "" -"Add the Pacemaker configuration for the OpenStack Image API resource. Use " -"the following command to connect to the Pacemaker cluster:" -msgstr "" -"Add the Pacemaker configuration for the OpenStack Image API resource. Use " -"the following command to connect to the Pacemaker cluster:" - -msgid "" -"Add the Pacemaker configuration for the Shared File Systems API resource. " -"Connect to the Pacemaker cluster with the following command:" -msgstr "" -"Add the Pacemaker configuration for the Shared File Systems API resource. " -"Connect to the Pacemaker cluster with the following command:" - -msgid "Add the following cluster resources:" -msgstr "Add the following cluster resources:" - -msgid "Additional parameters" -msgstr "Additional parameters" - -msgid "" -"After installing the Corosync package, you must create the :file:`/etc/" -"corosync/corosync.conf` configuration file." -msgstr "" -"After installing the Corosync package, you must create the :file:`/etc/" -"corosync/corosync.conf` configuration file." - -msgid "" -"After the ``corosync`` service have been started and you have verified that " -"the cluster is communicating properly, you can start :command:`pacemakerd`, " -"the Pacemaker master control process. Choose one from the following four " -"ways to start it:" -msgstr "" -"After the ``corosync`` service have been started and you have verified that " -"the cluster is communicating properly, you can start :command:`pacemakerd`, " -"the Pacemaker master control process. Choose one from the following four " -"ways to start it:" - -msgid "" -"After the ``pacemaker`` service has started, Pacemaker creates a default " -"empty cluster configuration with no resources. Use the :command:`crm_mon` " -"utility to observe the status of ``pacemaker``:" -msgstr "" -"After the ``pacemaker`` service has started, Pacemaker creates a default " -"empty cluster configuration with no resources. Use the :command:`crm_mon` " -"utility to observe the status of ``pacemaker``:" - -msgid "After you make these changes, commit the updated configuration." -msgstr "After you make these changes, commit the updated configuration." - -msgid "" -"After you set up your Pacemaker cluster, set a few basic cluster properties:" -msgstr "" -"After you set up your Pacemaker cluster, set a few basic cluster properties:" - -msgid "All routers are highly available by default." -msgstr "All routers are Highly Available by default." - -msgid "" -"Almost all services in this stack benefit from being proxied. Using a proxy " -"server provides the following capabilities:" -msgstr "" -"Almost all services in this stack benefit from being proxied. Using a proxy " -"server provides the following capabilities:" - -msgid "" -"Alternatively, if the database server is running, use the " -"``wsrep_last_committed`` status variable:" -msgstr "" -"Alternatively, if the database server is running, use the " -"``wsrep_last_committed`` status variable:" - -msgid "" -"Alternatively, instead of using systemd agents, download and install the OCF " -"resource agent:" -msgstr "" -"Alternatively, instead of using systemd agents, download and install the OCF " -"resource agent:" - -msgid "" -"Alternatively, make modifications using the ``firewall-cmd`` utility for " -"FirewallD that is available on many Linux distributions:" -msgstr "" -"Alternatively, make modifications using the ``firewall-cmd`` utility for " -"FirewallD that is available on many Linux distributions:" - -msgid "" -"Alternatively, you can use a commercial load balancer, which is hardware or " -"software. We recommend a hardware load balancer as it generally has good " -"performance." -msgstr "" -"Alternatively, you can use a commercial load balancer, which is hardware or " -"software. We recommend a hardware load balancer as it generally has good " -"performance." - -msgid "Alternatively:" -msgstr "Alternatively:" - -msgid "" -"An AMQP (Advanced Message Queuing Protocol) compliant message bus is " -"required for most OpenStack components in order to coordinate the execution " -"of jobs entered into the system." -msgstr "" -"An AMQP (Advanced Message Queuing Protocol) compliant message bus is " -"required for most OpenStack components in order to coordinate the execution " -"of jobs entered into the system." - -msgid "An OpenStack environment includes multiple data pools for the VMs:" -msgstr "An OpenStack environment includes multiple data pools for the VMs:" - -msgid "" -"And the quorum could also have been set to three, just as a configuration " -"example." -msgstr "" -"And the quorum could also have been set to three, just as a configuration " -"example." - -msgid "AppArmor" -msgstr "AppArmor" - -msgid "AppArmor now permits Galera Cluster to operate." -msgstr "AppArmor now permits Galera Cluster to operate." - -msgid "Appendix" -msgstr "Appendix" - -msgid "" -"Application Armor is a kernel module for improving security on Linux " -"operating systems. It is developed by Canonical and commonly used on Ubuntu-" -"based distributions. In the context of Galera Cluster, systems with AppArmor " -"may block the database service from operating normally." -msgstr "" -"Application Armour is a kernel module for improving security on Linux " -"operating systems. It is developed by Canonical and commonly used on Ubuntu-" -"based distributions. In the context of Galera Cluster, systems with AppArmor " -"may block the database service from operating normally." - -msgid "Applications and automatic service migration" -msgstr "Applications and automatic service migration" - -msgid "" -"As another option to make RabbitMQ highly available, RabbitMQ contains the " -"OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. " -"It provides the active/active RabbitMQ cluster with mirrored queues. For " -"more information, see `Auto-configuration of a cluster with a Pacemaker " -"`_." -msgstr "" -"As another option to make RabbitMQ highly available, RabbitMQ contains the " -"OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. " -"It provides the active/active RabbitMQ cluster with mirrored queues. For " -"more information, see `Auto-configuration of a cluster with a Pacemaker " -"`_." - -msgid "" -"As of September 2016, the OpenStack High Availability community is designing " -"and developing an official and unified way to provide high availability for " -"instances. We are developing automatic recovery from failures of hardware or " -"hypervisor-related software on the compute node, or other failures that " -"could prevent instances from functioning correctly, such as, issues with a " -"cinder volume I/O path." -msgstr "" -"As of September 2016, the OpenStack High Availability community is designing " -"and developing an official and unified way to provide high availability for " -"instances. We are developing automatic recovery from failures of hardware or " -"hypervisor-related software on the compute node, or other failures that " -"could prevent instances from functioning correctly, such as, issues with a " -"cinder volume I/O path." - -msgid "" -"At its core, a cluster is a distributed finite state machine capable of co-" -"ordinating the startup and recovery of inter-related services across a set " -"of machines." -msgstr "" -"At its core, a cluster is a distributed finite state machine capable of co-" -"ordinating the start-up and recovery of inter-related services across a set " -"of machines." - -msgid "Automated recovery of failed instances" -msgstr "Automated recovery of failed instances" - -msgid "Awareness of instances on other machines" -msgstr "Awareness of instances on other machines" - -msgid "Awareness of other applications in the stack" -msgstr "Awareness of other applications in the stack" - -msgid "" -"Bear in mind, leaving SELinux in permissive mode is not a good security " -"practice. Over the longer term, you need to develop a security policy for " -"Galera Cluster and then switch SELinux back into enforcing mode." -msgstr "" -"Bear in mind, leaving SELinux in permissive mode is not a good security " -"practice. Over the longer term, you need to develop a security policy for " -"Galera Cluster and then switch SELinux back into enforcing mode." - -msgid "" -"Before beginning, ensure that you are familiar with the documentation for " -"installing the OpenStack Image API service. See the *Image service* section " -"in the `Installation Guides `_, " -"depending on your distribution." -msgstr "" -"Before beginning, ensure that you are familiar with the documentation for " -"installing the OpenStack Image API service. See the *Image service* section " -"in the `Installation Guides `_, " -"depending on your distribution." - -msgid "" -"Before beginning, ensure you have read the `OpenStack Identity service " -"getting started documentation `_." -msgstr "" -"Before beginning, ensure you have read the `OpenStack Identity service " -"getting started documentation `_." - -msgid "" -"Before following this guide to configure the highly available OpenStack " -"cluster, ensure the IP ``10.0.0.11`` and hostname ``controller`` are not in " -"use." -msgstr "" -"Before following this guide to configure the highly available OpenStack " -"cluster, ensure the IP ``10.0.0.11`` and hostname ``controller`` are not in " -"use." - -msgid "" -"Before you launch Galera Cluster, you need to configure the server and the " -"database to operate as part of the cluster." -msgstr "" -"Before you launch Galera Cluster, you need to configure the server and the " -"database to operate as part of the cluster." - -msgid "" -"Both the central and the compute agent can run in an HA deployment. This " -"means that multiple instances of these services can run in parallel with " -"workload partitioning among these running instances." -msgstr "" -"Both the central and the compute agent can run in an HA deployment. This " -"means that multiple instances of these services can run in parallel with " -"workload partitioning among these running instances." - -msgid "" -"Both use a cluster manager, such as Pacemaker or Veritas, to orchestrate the " -"actions of the various services across a set of machines. Because we are " -"focused on FOSS, we refer to these as Pacemaker architectures." -msgstr "" -"Both use a cluster manager, such as Pacemaker or Veritas, to orchestrate the " -"actions of the various services across a set of machines. Because we are " -"focused on FOSS, we refer to these as Pacemaker architectures." - -msgid "" -"By default, STONITH is enabled in Pacemaker, but STONITH mechanisms (to " -"shutdown a node via IPMI or ssh) are not configured. In this case Pacemaker " -"will refuse to start any resources. For production cluster it is recommended " -"to configure appropriate STONITH mechanisms. But for demo or testing " -"purposes STONITH can be disabled completely as follows:" -msgstr "" -"By default, STONITH is enabled in Pacemaker, but STONITH mechanisms (to " -"shutdown a node via IPMI or ssh) are not configured. In this case Pacemaker " -"will refuse to start any resources. For production cluster it is recommended " -"to configure appropriate STONITH mechanisms. But for demo or testing " -"purposes STONITH can be disabled completely as follows:" - -msgid "" -"By default, ``controller1`` handles the caching service. If the host goes " -"down, ``controller2`` or ``controller3`` will complete the service." -msgstr "" -"By default, ``controller1`` handles the caching service. If the host goes " -"down, ``controller2`` or ``controller3`` will complete the service." - -msgid "" -"By default, cluster nodes do not start as part of a Primary Component. In " -"the Primary Component, replication and state transfers bring all databases " -"to the same state." -msgstr "" -"By default, cluster nodes do not start as part of a Primary Component. In " -"the Primary Component, replication and state transfers bring all databases " -"to the same state." - -msgid "" -"By sending all API access through the proxy, you can clearly identify " -"service interdependencies. You can also move them to locations other than " -"``localhost`` to increase capacity if the need arises." -msgstr "" -"By sending all API access through the proxy, you can clearly identify " -"service interdependencies. You can also move them to locations other than " -"``localhost`` to increase capacity if the need arises." - -msgid "Ceph" -msgstr "Ceph" - -msgid "" -"Ceph RBD provides object replication capabilities by storing Block Storage " -"volumes as Ceph RBD objects. Ceph RBD ensures that each replica of an object " -"is stored on a different node. This means that your volumes are protected " -"against hard drive and node failures, or even the failure of the data center " -"itself." -msgstr "" -"Ceph RBD provides object replication capabilities by storing Block Storage " -"volumes as Ceph RBD objects. Ceph RBD ensures that each replica of an object " -"is stored on a different node. This means that your volumes are protected " -"against hard drive and node failures, or even the failure of the data centre " -"itself." - -msgid "" -"Certain services running on the underlying operating system of your " -"OpenStack database may block Galera Cluster from normal operation or prevent " -"``mysqld`` from achieving network connectivity with the cluster." -msgstr "" -"Certain services running on the underlying operating system of your " -"OpenStack database may block Galera Cluster from normal operation or prevent " -"``mysqld`` from achieving network connectivity with the cluster." - -msgid "Change the number of expected votes for a cluster to be quorate" -msgstr "Change the number of expected votes for a cluster to be quorate" - -msgid "Change the number of votes assigned to a node" -msgstr "Change the number of votes assigned to a node" - -msgid "" -"Cinder provides Block-Storage-as-a-Service suitable for performance " -"sensitive scenarios such as databases, expandable file systems, or providing " -"a server with access to raw block level storage." -msgstr "" -"Cinder provides Block-Storage-as-a-Service suitable for performance " -"sensitive scenarios such as databases, expandable file systems, or providing " -"a server with access to raw block level storage." - -msgid "Clusters and quorums" -msgstr "Clusters and quorums" - -msgid "" -"Clusters with an even number of hosts suffer from similar issues. A single " -"network failure could easily cause a N:N split where neither side retains a " -"majority. For this reason, we recommend an odd number of cluster members " -"when scaling up." -msgstr "" -"Clusters with an even number of hosts suffer from similar issues. A single " -"network failure could easily cause a N:N split where neither side retains a " -"majority. For this reason, we recommend an odd number of cluster members " -"when scaling up." - -msgid "Collapsed" -msgstr "Collapsed" - -msgid "" -"Commit your configuration changes by entering the following command from " -"the :command:`crm configure` menu:" -msgstr "" -"Commit your configuration changes by entering the following command from " -"the :command:`crm configure` menu:" - -msgid "" -"Commit your configuration changes from the :command:`crm configure` menu " -"with the following command:" -msgstr "" -"Commit your configuration changes from the :command:`crm configure` menu " -"with the following command:" - -msgid "Common deployment architectures" -msgstr "Common deployment architectures" - -msgid "Configuration" -msgstr "Configuration" - -msgid "Configuration tips" -msgstr "Configuration tips" - -msgid "Configure Block Storage API service" -msgstr "Configure Block Storage API service" - -msgid "Configure NTP" -msgstr "Configure NTP" - -msgid "Configure OpenStack Identity service" -msgstr "Configure OpenStack Identity service" - -msgid "Configure OpenStack Image service API" -msgstr "Configure OpenStack Image service API" - -msgid "Configure OpenStack services to use HA Shared File Systems API" -msgstr "Configure OpenStack services to use HA Shared File Systems API" - -msgid "Configure OpenStack services to use Rabbit HA queues" -msgstr "Configure OpenStack services to use Rabbit HA queues" - -msgid "" -"Configure OpenStack services to use the highly available Block Storage API" -msgstr "" -"Configure OpenStack services to use the highly available Block Storage API" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Identity" -msgstr "" -"Configure OpenStack services to use the highly available OpenStack Identity" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Image API" -msgstr "" -"Configure OpenStack services to use the highly available OpenStack Image API" - -msgid "Configure RabbitMQ for HA queues" -msgstr "Configure RabbitMQ for HA queues" - -msgid "Configure Shared File Systems API service" -msgstr "Configure Shared File Systems API service" - -msgid "" -"Configure networking on each node. See the basic information about " -"configuring networking in the *Networking service* section of the `Install " -"Guides `_, depending on your " -"distribution." -msgstr "" -"Configure networking on each node. See the basic information about " -"configuring networking in the *Networking service* section of the `Install " -"Guides `_, depending on your " -"distribution." - -msgid "Configure the OpenStack components to use at least two RabbitMQ nodes." -msgstr "Configure the OpenStack components to use at least two RabbitMQ nodes." - -msgid "Configure the VIP" -msgstr "Configure the VIP" - -msgid "" -"Configure the kernel parameter to allow non-local IP binding. This allows " -"running HAProxy instances to bind to a VIP for failover. Add following line " -"to ``/etc/sysctl.conf``:" -msgstr "" -"Configure the kernel parameter to allow non-local IP binding. This allows " -"running HAProxy instances to bind to a VIP for failover. Add following line " -"to ``/etc/sysctl.conf``:" - -msgid "Configuring Block Storage to listen on the VIP address" -msgstr "Configuring Block Storage to listen on the VIP address" - -msgid "Configuring HAProxy" -msgstr "Configuring HAProxy" - -msgid "Configuring InnoDB" -msgstr "Configuring InnoDB" - -msgid "Configuring OpenStack services to use this IP address" -msgstr "Configuring OpenStack services to use this IP address" - -msgid "" -"Configuring RAID on the hard drives that implement storage protects your " -"data against a hard drive failure. If the node itself fails, data may be " -"lost. In particular, all volumes stored on an LVM node can be lost." -msgstr "" -"Configuring RAID on the hard drives that implement storage protects your " -"data against a hard drive failure. If the node itself fails, data may be " -"lost. In particular, all volumes stored on an LVM node can be lost." - -msgid "Configuring high availability for instances" -msgstr "Configuring high availability for instances" - -msgid "Configuring mysqld" -msgstr "Configuring mysqld" - -msgid "Configuring storage" -msgstr "Configuring storage" - -msgid "Configuring the basic environment" -msgstr "Configuring the basic environment" - -msgid "Configuring the compute node" -msgstr "Configuring the compute node" - -msgid "Configuring the controller" -msgstr "Configuring the controller" - -msgid "Configuring the networking services" -msgstr "Configuring the networking services" - -msgid "Configuring the server" -msgstr "Configuring the server" - -msgid "Configuring the shared services" -msgstr "Configuring the shared services" - -msgid "Configuring wsrep replication" -msgstr "Configuring wsrep replication" - -msgid "" -"Connect an additional quorum device to allow small clusters remain quorate " -"during node outages" -msgstr "" -"Connect an additional quorum device to allow small clusters remain quorate " -"during node outages" - -msgid "" -"Consider that, while exchanges and bindings survive the loss of individual " -"nodes, queues and their messages do not because a queue and its contents are " -"located on one node. If we lose this node, we also lose the queue." -msgstr "" -"Consider that, while exchanges and bindings survive the loss of individual " -"nodes, queues and their messages do not because a queue and its contents are " -"located on one node. If we lose this node, we also lose the queue." - -msgid "Contents" -msgstr "Contents" - -msgid "" -"Corosync can be configured to work with either multicast or unicast IP " -"addresses or to use the votequorum library." -msgstr "" -"Corosync can be configured to work with either multicast or unicast IP " -"addresses or to use the votequorum library." - -msgid "" -"Corosync is started as a regular system service. Depending on your " -"distribution, it may ship with an LSB init script, an upstart job, or a " -"Systemd unit file." -msgstr "" -"Corosync is started as a regular system service. Depending on your " -"distribution, it may ship with an LSB init script, an upstart job, or a " -"Systemd unit file." - -msgid "" -"Create a configuration file for ``clustercheck`` at ``/etc/sysconfig/" -"clustercheck``:" -msgstr "" -"Create a configuration file for ``clustercheck`` at ``/etc/sysconfig/" -"clustercheck``:" - -msgid "" -"Create a configuration file for the HAProxy monitor service, at ``/etc/" -"xinetd.d/galera-monitor``:" -msgstr "" -"Create a configuration file for the HAProxy monitor service, at ``/etc/" -"xinetd.d/galera-monitor``:" - -msgid "" -"Create a symbolic link for the database server in the ``disable`` directory:" -msgstr "" -"Create a symbolic link for the database server in the ``disable`` directory:" - -msgid "" -"Create and name the cluster. Then, start it and enable all components to " -"auto-start at boot time:" -msgstr "" -"Create and name the cluster. Then, start it and enable all components to " -"auto-start at boot time:" - -msgid "Create the Block Storage API endpoint with this IP." -msgstr "Create the Block Storage API endpoint with this IP." - -msgid "Create the OpenStack Identity Endpoint with this IP address." -msgstr "Create the OpenStack Identity Endpoint with this IP address." - -msgid "Current upstream work" -msgstr "Current upstream work" - -msgid "" -"Data integrity through fencing (a non-responsive process does not imply it " -"is not doing anything)" -msgstr "" -"Data integrity through fencing (a non-responsive process does not imply it " -"is not doing anything)" - -msgid "Data loss: Accidental deletion or destruction of data." -msgstr "Data loss: Accidental deletion or destruction of data." - -msgid "Database (Galera Cluster) for high availability" -msgstr "Database (Galera Cluster) for high availability" - -msgid "Database configuration" -msgstr "Database configuration" - -msgid "Database hosts with Galera Cluster installed" -msgstr "Database hosts with Galera Cluster installed" - -msgid "" -"Define the InnoDB memory buffer pool size. The default value is 128 MB, but " -"to compensate for Galera Cluster's additional memory usage, scale your usual " -"value back by 5%:" -msgstr "" -"Define the InnoDB memory buffer pool size. The default value is 128 MB, but " -"to compensate for Galera Cluster's additional memory usage, scale your usual " -"value back by 5%:" - -msgid "Deployment flavors" -msgstr "Deployment flavours" - -msgid "Deployment strategies" -msgstr "Deployment strategies" - -msgid "Description" -msgstr "Description" - -msgid "" -"Do not change this value. Other modes may cause ``INSERT`` statements on " -"tables with auto-increment columns to fail as well as unresolved deadlocks " -"that leave the system unresponsive." -msgstr "" -"Do not change this value. Other modes may cause ``INSERT`` statements on " -"tables with auto-increment columns to fail as well as unresolved deadlocks " -"that leave the system unresponsive." - -msgid "Download the resource agent to your system:" -msgstr "Download the resource agent to your system:" - -msgid "" -"Each configured interface must have a unique ``ringnumber``, starting with 0." -msgstr "" -"Each configured interface must have a unique ``ringnumber``, starting with 0." - -msgid "Each instance has its own IP address:" -msgstr "Each instance has its own IP address:" - -msgid "" -"Each instance of HAProxy configures its front end to accept connections only " -"to the virtual IP (VIP) address. The HAProxy back end (termination point) is " -"a list of all the IP addresses of instances for load balancing." -msgstr "" -"Each instance of HAProxy configures its front end to accept connections only " -"to the virtual IP (VIP) address. The HAProxy back end (termination point) is " -"a list of all the IP addresses of instances for load balancing." - -msgid "" -"Each service also has a backup but manages both the main and redundant " -"systems concurrently. This way, if there is a failure, the user is unlikely " -"to notice. The backup system is already online and takes on increased load " -"while the main system is fixed and brought back online." -msgstr "" -"Each service also has a backup but manages both the main and redundant " -"systems concurrently. This way, if there is a failure, the user is unlikely " -"to notice. The backup system is already online and takes on increased load " -"while the main system is fixed and brought back online." - -msgid "" -"Edit the :file:`/etc/glance/glance-api.conf` file to configure the OpenStack " -"Image service:" -msgstr "" -"Edit the :file:`/etc/glance/glance-api.conf` file to configure the OpenStack " -"Image service:" - -msgid "Edit the :file:`/etc/manila/manila.conf` file:" -msgstr "Edit the :file:`/etc/manila/manila.conf` file:" - -msgid "" -"Edit the :file:`keystone.conf` file to change the values of the :manpage:" -"`bind(2)` parameters:" -msgstr "" -"Edit the :file:`keystone.conf` file to change the values of the :manpage:" -"`bind(2)` parameters:" - -msgid "" -"Edit the ``/etc/cinder/cinder.conf`` file. For example, on a RHEL-based " -"system:" -msgstr "" -"Edit the ``/etc/cinder/cinder.conf`` file. For example, on a RHEL-based " -"system:" - -msgid "Enhanced failure detection" -msgstr "Enhanced failure detection" - -msgid "" -"Ensure that the InnoDB locking mode for generating auto-increment values is " -"set to ``2``, which is the interleaved locking mode:" -msgstr "" -"Ensure that the InnoDB locking mode for generating auto-increment values is " -"set to ``2``, which is the interleaved locking mode:" - -msgid "" -"Ensure that the InnoDB log buffer is written to file once per second, rather " -"than on each commit, to improve performance:" -msgstr "" -"Ensure that the InnoDB log buffer is written to file once per second, rather " -"than on each commit, to improve performance:" - -msgid "" -"Ensure that the binary log format is set to use row-level replication, as " -"opposed to statement-level replication:" -msgstr "" -"Ensure that the binary log format is set to use row-level replication, as " -"opposed to statement-level replication:" - -msgid "" -"Ensure that the database server is not bound only to the localhost: " -"``127.0.0.1``. Also, do not bind it to ``0.0.0.0``. Binding to the localhost " -"or ``0.0.0.0`` makes ``mySQL`` bind to all IP addresses on the machine, " -"including the virtual IP address causing ``HAProxy`` not to start. Instead, " -"bind to the management IP address of the controller node to enable access by " -"other nodes through the management network:" -msgstr "" -"Ensure that the database server is not bound only to the localhost: " -"``127.0.0.1``. Also, do not bind it to ``0.0.0.0``. Binding to the localhost " -"or ``0.0.0.0`` makes ``mySQL`` bind to all IP addresses on the machine, " -"including the virtual IP address causing ``HAProxy`` not to start. Instead, " -"bind to the management IP address of the controller node to enable access by " -"other nodes through the management network:" - -msgid "Ensure that the default storage engine is set to InnoDB:" -msgstr "Ensure that the default storage engine is set to InnoDB:" - -msgid "" -"Ensure your HAProxy installation is not a single point of failure, it is " -"advisable to have multiple HAProxy instances running." -msgstr "" -"Ensure your HAProxy installation is not a single point of failure, it is " -"advisable to have multiple HAProxy instances running." - -msgid "" -"Ephemeral storage is allocated for an instance and is deleted when the " -"instance is deleted. The Compute service manages ephemeral storage and by " -"default, Compute stores ephemeral drives as files on local disks on the " -"compute node. As an alternative, you can use Ceph RBD as the storage back " -"end for ephemeral storage." -msgstr "" -"Ephemeral storage is allocated for an instance and is deleted when the " -"instance is deleted. The Compute service manages ephemeral storage and by " -"default, Compute stores ephemeral drives as files on local disks on the " -"compute node. As an alternative, you can use Ceph RBD as the storage back " -"end for ephemeral storage." - -msgid "" -"Even a distributed or replicated application that is able to survive " -"failures on one or more machines can benefit from a cluster manager because " -"a cluster manager has the following capabilities:" -msgstr "" -"Even a distributed or replicated application that is able to survive " -"failures on one or more machines can benefit from a cluster manager because " -"a cluster manager has the following capabilities:" - -msgid "Existing solutions" -msgstr "Existing solutions" - -msgid "Facility services such as power, air conditioning, and fire protection" -msgstr "Facility services such as power, air conditioning, and fire protection" - -msgid "Firewall" -msgstr "Firewall" - -msgid "" -"For Liberty, you can not have the standalone network nodes. The Networking " -"services are run on the controller nodes. In this guide, the term `network " -"nodes` is used for convenience." -msgstr "" -"For Liberty, you can not have the standalone network nodes. The Networking " -"services are run on the controller nodes. In this guide, the term `network " -"nodes` is used for convenience." - -msgid "" -"For OpenStack Compute, (if your OpenStack Identity service IP address is " -"10.0.0.11) use the following configuration in the :file:`api-paste.ini` file:" -msgstr "" -"For OpenStack Compute, (if your OpenStack Identity service IP address is " -"10.0.0.11) use the following configuration in the :file:`api-paste.ini` file:" - -msgid "For RHEL, Fedora, or CentOS:" -msgstr "For RHEL, Fedora, or CentOS:" - -msgid "" -"For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the " -"following process uses Systemd unit files." -msgstr "" -"For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the " -"following process uses Systemd unit files." - -msgid "" -"For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. You should " -"verify the fingerprint of the imported GPG key before using it." -msgstr "" -"For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. You should " -"verify the fingerprint of the imported GPG key before using it." - -msgid "For SLES 12:" -msgstr "For SLES 12:" - -msgid "" -"For UDPU, every node that should be a member of the membership must be " -"specified." -msgstr "" -"For UDPU, every node that should be a member of the membership must be " -"specified." - -msgid "" -"For Ubuntu 16.04.1: Create a configuration file for ``clustercheck`` at ``/" -"etc/default/clustercheck``." -msgstr "" -"For Ubuntu 16.04.1: Create a configuration file for ``clustercheck`` at ``/" -"etc/default/clustercheck``." - -msgid "For Ubuntu or Debian:" -msgstr "For Ubuntu or Debian:" - -msgid "" -"For Ubuntu, you should also enable the Corosync service in the ``/etc/" -"default/corosync`` configuration file." -msgstr "" -"For Ubuntu, you should also enable the Corosync service in the ``/etc/" -"default/corosync`` configuration file." - -msgid "" -"For `Fedora `_" -msgstr "" -"For `Fedora `_" - -msgid "" -"For `Ubuntu `_" -msgstr "" -"For `Ubuntu `_" - -msgid "For ``crmsh``:" -msgstr "For ``crmsh``:" - -msgid "For ``pcs``:" -msgstr "For ``pcs``:" - -msgid "" -"For a complete list of the available parameters, run the ``SHOW VARIABLES`` " -"command from within the database client:" -msgstr "" -"For a complete list of the available parameters, run the ``SHOW VARIABLES`` " -"command from within the database client:" - -msgid "" -"For backward compatibility and supporting existing deployments, the central " -"agent configuration supports using different configuration files. This is " -"for groups of service instances that are running in parallel. For enabling " -"this configuration, set a value for the ``partitioning_group_prefix`` option " -"in the `polling section `_ in the OpenStack Configuration " -"Reference." -msgstr "" -"For backward compatibility and supporting existing deployments, the central " -"agent configuration supports using different configuration files. This is " -"for groups of service instances that are running in parallel. For enabling " -"this configuration, set a value for the ``partitioning_group_prefix`` option " -"in the `polling section `_ in the OpenStack Configuration " -"Reference." - -msgid "" -"For demonstrations and studying, you can set up a test environment on " -"virtual machines (VMs). This has the following benefits:" -msgstr "" -"For demonstrations and studying, you can set up a test environment on " -"virtual machines (VMs). This has the following benefits:" - -msgid "" -"For detailed instructions about installing HAProxy on your nodes, see the " -"HAProxy `official documentation `_." -msgstr "" -"For detailed instructions about installing HAProxy on your nodes, see the " -"HAProxy `official documentation `_." - -msgid "" -"For documentation about these parameters, ``wsrep`` provider option, and " -"status variables available in Galera Cluster, see the Galera cluster " -"`Reference `_." -msgstr "" -"For documentation about these parameters, ``wsrep`` provider option, and " -"status variables available in Galera Cluster, see the Galera cluster " -"`Reference `_." - -msgid "" -"For each sub-group of the central agent pool with the same " -"``partitioning_group_prefix``, a disjoint subset of meters must be polled to " -"avoid samples being missing or duplicated. The list of meters to poll can be " -"set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. For " -"more information about pipelines see the `Data processing and pipelines " -"`_ " -"section." -msgstr "" -"For each sub-group of the central agent pool with the same " -"``partitioning_group_prefix``, a disjoint subset of meters must be polled to " -"avoid samples being missing or duplicated. The list of meters to poll can be " -"set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. For " -"more information about pipelines see the `Data processing and pipelines " -"`_ " -"section." - -msgid "" -"For environments that do not support multicast, Corosync should be " -"configured for unicast. An example fragment of the :file:`corosync.conf` " -"file for unicastis is shown below:" -msgstr "" -"For environments that do not support multicast, Corosync should be " -"configured for unicast. An example fragment of the :file:`corosync.conf` " -"file for unicast is shown below:" - -msgid "" -"For example, if your OpenStack Image API service IP address is 10.0.0.11 (as " -"in the configuration explained here), you would use the following " -"configuration in your :file:`nova.conf` file:" -msgstr "" -"For example, if your OpenStack Image API service IP address is 10.0.0.11 (as " -"in the configuration explained here), you would use the following " -"configuration in your :file:`nova.conf` file:" - -msgid "" -"For example, in a seven-node cluster, the quorum should be set to " -"``floor(7/2) + 1 == 4``. If quorum is four and four nodes fail " -"simultaneously, the cluster itself would fail, whereas it would continue to " -"function, if no more than three nodes fail. If split to partitions of three " -"and four nodes respectively, the quorum of four nodes would continue to " -"operate the majority partition and stop or fence the minority one (depending " -"on the no-quorum-policy cluster configuration)." -msgstr "" -"For example, in a seven-node cluster, the quorum should be set to " -"``floor(7/2) + 1 == 4``. If quorum is four and four nodes fail " -"simultaneously, the cluster itself would fail, whereas it would continue to " -"function, if no more than three nodes fail. If split to partitions of three " -"and four nodes respectively, the quorum of four nodes would continue to " -"operate the majority partition and stop or fence the minority one (depending " -"on the no-quorum-policy cluster configuration)." - -msgid "" -"For example, you may enter ``edit p_ip_glance-api`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"For example, you may enter ``edit p_ip_glance-api`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." - -msgid "" -"For example, you may enter ``edit p_ip_keystone`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"For example, you may enter ``edit p_ip_keystone`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." - -msgid "" -"For example, you may enter ``edit p_ip_manila-api`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"For example, you may enter ``edit p_ip_manila-api`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." - -msgid "" -"For firewall configurations, Corosync communicates over UDP only, and uses " -"``mcastport`` (for receives) and ``mcastport - 1`` (for sends)." -msgstr "" -"For firewall configurations, Corosync communicates over UDP only, and uses " -"``mcastport`` (for receives) and ``mcastport - 1`` (for sends)." - -msgid "" -"For information about the required configuration options to set in the :file:" -"`ceilometer.conf`, see the `coordination section `_ in the OpenStack Configuration " -"Reference." -msgstr "" -"For information about the required configuration options to set in the :file:" -"`ceilometer.conf`, see the `coordination section `_ in the OpenStack Configuration " -"Reference." - -msgid "" -"For more information about Memcached installation, see the *Environment -> " -"Memcached* section in the `Installation Guides `_ depending on your distribution." -msgstr "" -"For more information about Memcached installation, see the *Environment -> " -"Memcached* section in the `Installation Guides `_ depending on your distribution." - -msgid "" -"For more information about configuring storage back ends for the different " -"storage options, see `Manage volumes `_ in the OpenStack Administrator Guide." -msgstr "" -"For more information about configuring storage back ends for the different " -"storage options, see `Manage volumes `_ in the OpenStack Administrator Guide." - -msgid "" -"For more information on configuring SELinux to work with Galera Cluster, see " -"the `SELinux Documentation `_" -msgstr "" -"For more information on configuring SELinux to work with Galera Cluster, see " -"the `SELinux Documentation `_" - -msgid "" -"For more information on firewalls, see `firewalls and default ports `_ in OpenStack " -"Administrator Guide." -msgstr "" -"For more information on firewalls, see `firewalls and default ports `_ in OpenStack " -"Administrator Guide." - -msgid "" -"For more information, see the official installation manual for the " -"distribution:" -msgstr "" -"For more information, see the official installation manual for the " -"distribution:" - -msgid "For openSUSE:" -msgstr "For openSUSE:" - -msgid "For servers that use ``systemd``, run the following command:" -msgstr "For servers that use ``systemd``, run the following command:" - -msgid "For servers that use ``systemd``, run the following commands:" -msgstr "For servers that use ``systemd``, run the following commands:" - -msgid "" -"For these reasons, we highly recommend the use of a cluster manager like " -"`Pacemaker `_." -msgstr "" -"For these reasons, we highly recommend the use of a cluster manager like " -"`Pacemaker `_." - -msgid "" -"For this reason, each cluster in a high availability environment should have " -"an odd number of nodes and the quorum is defined as more than a half of the " -"nodes. If multiple nodes fail so that the cluster size falls below the " -"quorum value, the cluster itself fails." -msgstr "" -"For this reason, each cluster in a High Availability environment should have " -"an odd number of nodes and the quorum is defined as more than a half of the " -"nodes. If multiple nodes fail so that the cluster size falls below the " -"quorum value, the cluster itself fails." - -msgid "" -"Galera Cluster configuration parameters all have the ``wsrep_`` prefix. You " -"must define the following parameters for each cluster node in your OpenStack " -"database." -msgstr "" -"Galera Cluster configuration parameters all have the ``wsrep_`` prefix. You " -"must define the following parameters for each cluster node in your OpenStack " -"database." - -msgid "" -"Galera Cluster does not support non-transactional storage engines and " -"requires that you use InnoDB by default. There are some additional " -"parameters that you must define to avoid conflicts." -msgstr "" -"Galera Cluster does not support non-transactional storage engines and " -"requires that you use InnoDB by default. There are some additional " -"parameters that you must define to avoid conflicts." - -msgid "" -"Galera Cluster requires that you open the following ports to network traffic:" -msgstr "" -"Galera Cluster requires that you open the following ports to network traffic:" - -msgid "Galera can be configured using one of the following strategies:" -msgstr "Galera can be configured using one of the following strategies:" - -msgid "Galera runs behind HAProxy:" -msgstr "Galera runs behind HAProxy:" - -msgid "" -"Galera synchronous replication guarantees a zero slave lag. The failover " -"procedure completes once HAProxy detects that the active back end has gone " -"down and switches to the backup one, which is then marked as ``UP``. If no " -"back ends are ``UP``, the failover procedure finishes only when the Galera " -"Cluster has been successfully reassembled. The SLA is normally no more than " -"5 minutes." -msgstr "" -"Galera synchronous replication guarantees a zero slave lag. The failover " -"procedure completes once HAProxy detects that the active back end has gone " -"down and switches to the backup one, which is then marked as ``UP``. If no " -"back ends are ``UP``, the failover procedure finishes only when the Galera " -"Cluster has been successfully reassembled. The SLA is normally no more than " -"5 minutes." - -msgid "" -"Generally, we use round-robin to distribute load amongst instances of active/" -"active services. Alternatively, Galera uses ``stick-table`` options to " -"ensure that incoming connection to virtual IP (VIP) are directed to only one " -"of the available back ends. This helps avoid lock contention and prevent " -"deadlocks, although Galera can run active/active. Used in combination with " -"the ``httpchk`` option, this ensure only nodes that are in sync with their " -"peers are allowed to handle requests." -msgstr "" -"Generally, we use round-robin to distribute load amongst instances of active/" -"active services. Alternatively, Galera uses ``stick-table`` options to " -"ensure that incoming connection to virtual IP (VIP) are directed to only one " -"of the available back ends. This helps avoid lock contention and prevent " -"deadlocks, although Galera can run active/active. Used in combination with " -"the ``httpchk`` option, this ensure only nodes that are in sync with their " -"peers are allowed to handle requests." - -msgid "HAProxy" -msgstr "HAProxy" - -msgid "" -"HAProxy load balances incoming requests and exposes just one IP address for " -"all the clients." -msgstr "" -"HAProxy load balances incoming requests and exposes just one IP address for " -"all the clients." - -msgid "" -"HAProxy provides a fast and reliable HTTP reverse proxy and load balancer " -"for TCP or HTTP applications. It is particularly suited for web crawling " -"under very high loads while needing persistence or Layer 7 processing. It " -"realistically supports tens of thousands of connections with recent hardware." -msgstr "" -"HAProxy provides a fast and reliable HTTP reverse proxy and load balancer " -"for TCP or HTTP applications. It is particularly suited for web crawling " -"under very high loads while needing persistence or Layer 7 processing. It " -"realistically supports tens of thousands of connections with recent hardware." - -msgid "Hardware considerations for high availability" -msgstr "Hardware considerations for high availability" - -msgid "Hardware setup" -msgstr "Hardware setup" - -msgid "" -"High availability is implemented with redundant hardware running redundant " -"instances of each service. If one piece of hardware running one instance of " -"a service fails, the system can then failover to use another instance of a " -"service that is running on hardware that did not fail." -msgstr "" -"High Availability is implemented with redundant hardware running redundant " -"instances of each service. If one piece of hardware running one instance of " -"a service fails, the system can then failover to use another instance of a " -"service that is running on hardware that did not fail." - -msgid "" -"High availability is not for every user. It presents some challenges. High " -"availability may be too complex for databases or systems with large amounts " -"of data. Replication can slow large systems down. Different setups have " -"different prerequisites. Read the guidelines for each setup." -msgstr "" -"High Availability is not for every user. It presents some challenges. High " -"availability may be too complex for databases or systems with large amounts " -"of data. Replication can slow large systems down. Different setups have " -"different prerequisites. Read the guidelines for each setup." - -msgid "High availability is turned off as the default in OpenStack setups." -msgstr "High Availability is turned off as the default in OpenStack setups." - -msgid "High availability systems seek to minimize the following issues:" -msgstr "High Availability systems seek to minimise the following issues:" - -msgid "" -"High availability systems typically achieve an uptime percentage of 99.99% " -"or more, which roughly equates to less than an hour of cumulative downtime " -"per year. In order to achieve this, high availability systems should keep " -"recovery times after a failure to about one to two minutes, sometimes " -"significantly less." -msgstr "" -"High Availability systems typically achieve an uptime percentage of 99.99% " -"or more, which roughly equates to less than an hour of cumulative downtime " -"per year. In order to achieve this, high availability systems should keep " -"recovery times after a failure to about one to two minutes, sometimes " -"significantly less." - -msgid "Highly available Block Storage API" -msgstr "Highly available Block Storage API" - -msgid "Highly available Identity API" -msgstr "Highly available Identity API" - -msgid "Highly available Image API" -msgstr "Highly available Image API" - -msgid "Highly available Shared File Systems API" -msgstr "Highly available Shared File Systems API" - -msgid "Highly available Telemetry" -msgstr "Highly available Telemetry" - -msgid "How long to back-off for between retries when connecting to RabbitMQ:" -msgstr "How long to back-off for between retries when connecting to RabbitMQ:" - -msgid "" -"However, running an OpenStack environment on VMs degrades the performance of " -"your instances, particularly if your hypervisor or processor lacks support " -"for hardware acceleration of nested VMs." -msgstr "" -"However, running an OpenStack environment on VMs degrades the performance of " -"your instances, particularly if your hypervisor or processor lacks support " -"for hardware acceleration of nested VMs." - -msgid "" -"If the Block Storage service runs on the same nodes as the other services, " -"then it is advisable to also include:" -msgstr "" -"If the Block Storage service runs on the same nodes as the other services, " -"then it is advisable to also include:" - -msgid "" -"If the Identity service will be sending ceilometer notifications and your " -"message bus is configured for high availability, you will need to ensure " -"that the Identity service is correctly configured to use it. For details on " -"how to configure the Identity service for this kind of deployment, see :doc:" -"`shared-messaging`." -msgstr "" -"If the Identity service will be sending Ceilometer notifications and your " -"message bus is configured for high availability, you will need to ensure " -"that the Identity service is correctly configured to use it. For details on " -"how to configure the Identity service for this kind of deployment, see :doc:" -"`shared-messaging`." - -msgid "" -"If the ``broadcast`` parameter is set to ``yes``, the broadcast address is " -"used for communication. If this option is set, the ``mcastaddr`` parameter " -"should not be set." -msgstr "" -"If the ``broadcast`` parameter is set to ``yes``, the broadcast address is " -"used for communication. If this option is set, the ``mcastaddr`` parameter " -"should not be set." - -msgid "" -"If the cluster is working, you can create usernames and passwords for the " -"queues." -msgstr "" -"If the cluster is working, you can create usernames and passwords for the " -"queues." - -msgid "" -"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " -"lines under the service stanza. These stanzas enable Pacemaker to start up. " -"Another potential problem is the boot and shutdown order of Corosync and " -"Pacemaker. To force Pacemaker to start after Corosync and stop before " -"Corosync, fix the start and kill symlinks manually:" -msgstr "" -"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " -"lines under the service stanza. These stanzas enable Pacemaker to start up. " -"Another potential problem is the boot and shutdown order of Corosync and " -"Pacemaker. To force Pacemaker to start after Corosync and stop before " -"Corosync, fix the start and kill symlinks manually:" - -msgid "" -"If you are using Corosync version 2, use the :command:`corosync-cmapctl` " -"utility instead of :command:`corosync-objctl`; it is a direct replacement." -msgstr "" -"If you are using Corosync version 2, use the :command:`corosync-cmapctl` " -"utility instead of :command:`corosync-objctl`; it is a direct replacement." - -msgid "" -"If you are using both private and public IP addresses, create two virtual IP " -"addresses and define the endpoint. For example:" -msgstr "" -"If you are using both private and public IP addresses, create two virtual IP " -"addresses and define the endpoint. For example:" - -msgid "" -"If you are using both private and public IP addresses, create two virtual " -"IPs and define your endpoint. For example:" -msgstr "" -"If you are using both private and public IP addresses, create two virtual " -"IPs and define your endpoint. For example:" - -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IPs and define your endpoints like this:" -msgstr "" -"If you are using both private and public IP addresses, you should create two " -"virtual IPs and define your endpoints like this:" - -msgid "" -"If you are using the Block Storage service OCF agent, some settings will be " -"filled in for you, resulting in a shorter configuration file:" -msgstr "" -"If you are using the Block Storage service OCF agent, some settings will be " -"filled in for you, resulting in a shorter configuration file:" - -msgid "" -"If you are using the horizon Dashboard, edit the :file:`local_settings.py` " -"file to include the following:" -msgstr "" -"If you are using the horizon Dashboard, edit the :file:`local_settings.py` " -"file to include the following:" - -msgid "" -"If you change the configuration from an old set-up that did not use HA " -"queues, restart the service:" -msgstr "" -"If you change the configuration from an old set-up that did not use HA " -"queues, restart the service:" - -msgid "" -"If you use HAProxy as a load-balancing client to provide access to the " -"Galera Cluster, as described in the :doc:`controller-ha-haproxy`, you can " -"use the ``clustercheck`` utility to improve health checks." -msgstr "" -"If you use HAProxy as a load-balancing client to provide access to the " -"Galera Cluster, as described in the :doc:`controller-ha-haproxy`, you can " -"use the ``clustercheck`` utility to improve health checks." - -msgid "" -"In Corosync, configurations use redundant networking (with more than one " -"interface). This means you must select a Redundant Ring Protocol (RRP) mode " -"other than none. We recommend ``active`` as the RRP mode." -msgstr "" -"In Corosync, configurations use redundant networking (with more than one " -"interface). This means you must select a Redundant Ring Protocol (RRP) mode " -"other than none. We recommend ``active`` as the RRP mode." - -msgid "" -"In Red Hat Enterprise Linux or CentOS environments, this is a recommended " -"path to perform configuration. For more information, see the `RHEL docs " -"`_." -msgstr "" -"In Red Hat Enterprise Linux or CentOS environments, this is a recommended " -"path to perform configuration. For more information, see the `RHEL docs " -"`_." - -msgid "" -"In a collapsed configuration, there is a single cluster of 3 or more nodes " -"on which every component is running." -msgstr "" -"In a collapsed configuration, there is a single cluster of 3 or more nodes " -"on which every component is running." - -msgid "" -"In addition to Galera Cluster, you can also achieve high availability " -"through other database options, such as PostgreSQL, which has its own " -"replication system." -msgstr "" -"In addition to Galera Cluster, you can also achieve High Availability " -"through other database options, such as PostgreSQL, which has its own " -"replication system." - -msgid "" -"In general, we can divide all the OpenStack components into three categories:" -msgstr "" -"In general, we can divide all the OpenStack components into three categories:" - -msgid "" -"In the Galera Cluster, the Primary Component is the cluster of database " -"servers that replicate into each other. In the event that a cluster node " -"loses connectivity with the Primary Component, it defaults into a non-" -"operational state, to avoid creating or serving inconsistent data." -msgstr "" -"In the Galera Cluster, the Primary Component is the cluster of database " -"servers that replicate into each other. In the event that a cluster node " -"loses connectivity with the Primary Component, it defaults into a non-" -"operational state, to avoid creating or serving inconsistent data." - -msgid "" -"In the event that a component fails and a back-up system must take on its " -"load, most high availability systems will replace the failed component as " -"quickly as possible to maintain necessary redundancy. This way time spent in " -"a degraded protection state is minimized." -msgstr "" -"In the event that a component fails and a back-up system must take on its " -"load, most High Availability systems will replace the failed component as " -"quickly as possible to maintain necessary redundancy. This way time spent in " -"a degraded protection state is minimized." - -msgid "" -"In the event that you need to restart any cluster node, you can do so. When " -"the database server comes back it, it establishes connectivity with the " -"Primary Component and updates itself to any changes it may have missed while " -"down." -msgstr "" -"In the event that you need to restart any cluster node, you can do so. When " -"the database server comes back it, it establishes connectivity with the " -"Primary Component and updates itself to any changes it may have missed while " -"down." - -msgid "" -"In theory, you can run the Block Storage service as active/active. However, " -"because of sufficient concerns, we recommend running the volume component as " -"active/passive only." -msgstr "" -"In theory, you can run the Block Storage service as active/active. However, " -"because of sufficient concerns, we recommend running the volume component as " -"active/passive only." - -msgid "" -"In this configuration, each service runs in a dedicated cluster of 3 or more " -"nodes." -msgstr "" -"In this configuration, each service runs in a dedicated cluster of 3 or more " -"nodes." - -msgid "" -"Individual cluster nodes can stop and be restarted without issue. When a " -"database loses its connection or restarts, the Galera Cluster brings it back " -"into sync once it reestablishes connection with the Primary Component. In " -"the event that you need to restart the entire cluster, identify the most " -"advanced cluster node and initialize the Primary Component on that node." -msgstr "" -"Individual cluster nodes can stop and be restarted without issue. When a " -"database loses its connection or restarts, the Galera Cluster brings it back " -"into sync once it re-establishes connection with the Primary Component. In " -"the event that you need to restart the entire cluster, identify the most " -"advanced cluster node and initialise the Primary Component on that node." - -msgid "" -"Initialize the Primary Component on one cluster node. For servers that use " -"``init``, run the following command:" -msgstr "" -"Initialise the Primary Component on one cluster node. For servers that use " -"``init``, run the following command:" - -msgid "Initializing the cluster" -msgstr "Initialising the cluster" - -msgid "Install RabbitMQ" -msgstr "Install RabbitMQ" - -msgid "Install packages" -msgstr "Install packages" - -msgid "Installing Memcached" -msgstr "Installing Memcached" - -msgid "Installing the operating system" -msgstr "Installing the operating system" - -msgid "Introduction to OpenStack high availability" -msgstr "Introduction to OpenStack High Availability" - -msgid "" -"It is also possible to follow a segregated approach for one or more " -"components that are expected to be a bottleneck and use a collapsed approach " -"for the remainder." -msgstr "" -"It is also possible to follow a segregated approach for one or more " -"components that are expected to be a bottleneck and use a collapsed approach " -"for the remainder." - -msgid "" -"It is possible to add controllers to such an environment to convert it into " -"a truly highly available environment." -msgstr "" -"It is possible to add controllers to such an environment to convert it into " -"a truly highly available environment." - -msgid "" -"It is possible to deploy three different flavors of the Pacemaker " -"architecture. The two extremes are ``Collapsed`` (where every component runs " -"on every node) and ``Segregated`` (where every component runs in its own 3+ " -"node cluster)." -msgstr "" -"It is possible to deploy three different flavours of the Pacemaker " -"architecture. The two extremes are ``Collapsed`` (where every component runs " -"on every node) and ``Segregated`` (where every component runs in its own 3+ " -"node cluster)." - -msgid "" -"It is storage and application-agnostic, and in no way specific to OpenStack." -msgstr "" -"It is storage and application-agnostic, and in no way specific to OpenStack." - -msgid "" -"It is very important that all members of the system share the same view of " -"who their peers are and whether or not they are in the majority. Failure to " -"do this leads very quickly to an internal `split-brain `_ state. This is where different parts of " -"the system are pulling in different and incompatible directions." -msgstr "" -"It is very important that all members of the system share the same view of " -"who their peers are and whether or not they are in the majority. Failure to " -"do this leads very quickly to an internal `split-brain `_ state. This is where different parts of " -"the system are pulling in different and incompatible directions." - -msgid "List the nodes known to the quorum service" -msgstr "List the nodes known to the quorum service" - -msgid "Load distribution" -msgstr "Load distribution" - -msgid "" -"Locate your HAProxy instance on each OpenStack controller node in your " -"environment. The following is an example ``/etc/haproxy/haproxy.cfg`` " -"configuration file. Configure your instance using the following " -"configuration file, you will need a copy of it on each controller node." -msgstr "" -"Locate your HAProxy instance on each OpenStack controller node in your " -"environment. The following is an example ``/etc/haproxy/haproxy.cfg`` " -"configuration file. Configure your instance using the following " -"configuration file, you will need a copy of it on each controller node." - -msgid "" -"Log in to the database client and grant the ``clustercheck`` user " -"``PROCESS`` privileges:" -msgstr "" -"Log in to the database client and grant the ``clustercheck`` user " -"``PROCESS`` privileges:" - -msgid "" -"Maintains a redundant instance that can be brought online when the active " -"service fails. For example, OpenStack writes to the main database while " -"maintaining a disaster recovery database that can be brought online if the " -"main database fails." -msgstr "" -"Maintains a redundant instance that can be brought online when the active " -"service fails. For example, OpenStack writes to the main database while " -"maintaining a disaster recovery database that can be brought online if the " -"main database fails." - -msgid "Make sure `pcs` is running and configured to start at boot time:" -msgstr "Make sure `pcs` is running and configured to start at boot time:" - -msgid "" -"Make sure to save the changes once you are done. This will vary depending on " -"your distribution:" -msgstr "" -"Make sure to save the changes once you are done. This will vary depending on " -"your distribution:" - -msgid "" -"Making the Block Storage (cinder) API service highly available in active/" -"active mode involves:" -msgstr "" -"Making the Block Storage (Cinder) API service highly available in active/" -"active mode involves:" - -msgid "" -"Making the Block Storage API service highly available in active/passive mode " -"involves:" -msgstr "" -"Making the Block Storage API service highly available in active/passive mode " -"involves:" - -msgid "" -"Making the OpenStack Identity service highly available in active and passive " -"mode involves:" -msgstr "" -"Making the OpenStack Identity service highly available in active and passive " -"mode involves:" - -msgid "" -"Making the RabbitMQ service highly available involves the following steps:" -msgstr "" -"Making the RabbitMQ service highly available involves the following steps:" - -msgid "" -"Making the Shared File Systems (manila) API service highly available in " -"active/passive mode involves:" -msgstr "" -"Making the Shared File Systems (Manila) API service highly available in " -"active/passive mode involves:" - -msgid "Management" -msgstr "Management" - -msgid "" -"Managing the Block Storage API daemon with the Pacemaker cluster manager" -msgstr "" -"Managing the Block Storage API daemon with the Pacemaker cluster manager" - -msgid "" -"Many services can act in an active/active capacity, however, they usually " -"require an external mechanism for distributing requests to one of the " -"available instances. The proxy server can serve this role." -msgstr "" -"Many services can act in an active/active capacity, however, they usually " -"require an external mechanism for distributing requests to one of the " -"available instances. The proxy server can serve this role." - -msgid "Maximum number of network nodes to use for the HA router." -msgstr "Maximum number of network nodes to use for the HA router." - -msgid "" -"Maximum retries with trying to connect to RabbitMQ (infinite by default):" -msgstr "" -"Maximum retries with trying to connect to RabbitMQ (infinite by default):" - -msgid "Memcached" -msgstr "Memcached" - -msgid "" -"Memcached is a general-purpose distributed memory caching system. It is used " -"to speed up dynamic database-driven websites by caching data and objects in " -"RAM to reduce the number of times an external data source must be read." -msgstr "" -"Memcached is a general-purpose distributed memory caching system. It is used " -"to speed up dynamic database-driven websites by caching data and objects in " -"RAM to reduce the number of times an external data source must be read." - -msgid "" -"Memcached is a memory cache demon that can be used by most OpenStack " -"services to store ephemeral data, such as tokens." -msgstr "" -"Memcached is a memory cache demon that can be used by most OpenStack " -"services to store ephemeral data, such as tokens." - -msgid "" -"Memcached uses a timeout value, which should always be set to a value that " -"is higher than the heartbeat value set for Telemetry." -msgstr "" -"Memcached uses a timeout value, which should always be set to a value that " -"is higher than the heartbeat value set for Telemetry." - -msgid "Memory" -msgstr "Memory" - -msgid "" -"Memory caching is managed by `oslo.cache `_. This " -"ensures consistency across all projects when using multiple Memcached " -"servers. The following is an example configuration with three hosts:" -msgstr "" -"Memory caching is managed by `oslo.cache `_. This " -"ensures consistency across all projects when using multiple Memcached " -"servers. The following is an example configuration with three hosts:" - -msgid "Messaging service for high availability" -msgstr "Messaging service for High Availability" - -msgid "" -"Minimum number of network nodes to use for the HA router. A new router can " -"be created only if this number of network nodes are available." -msgstr "" -"Minimum number of network nodes to use for the HA router. A new router can " -"be created only if this number of network nodes are available." - -msgid "" -"Mirrored queues in RabbitMQ improve the availability of service since it is " -"resilient to failures." -msgstr "" -"Mirrored queues in RabbitMQ improve the availability of service since it is " -"resilient to failures." - -msgid "Mixed" -msgstr "Mixed" - -msgid "MongoDB" -msgstr "MongoDB" - -msgid "" -"More details are available in the `user story `_ co-" -"authored by OpenStack's HA community and `Product Working Group `_ (PWG), where this feature is " -"identified as missing functionality in OpenStack, which should be addressed " -"with high priority." -msgstr "" -"More details are available in the `user story `_ co-" -"authored by OpenStack's HA community and `Product Working Group `_ (PWG), where this feature is " -"identified as missing functionality in OpenStack, which should be addressed " -"with high priority." - -msgid "More information is available in the RabbitMQ documentation:" -msgstr "More information is available in the RabbitMQ documentation:" - -msgid "" -"Most OpenStack services can use Memcached to store ephemeral data such as " -"tokens. Although Memcached does not support typical forms of redundancy such " -"as clustering, OpenStack services can use almost any number of instances by " -"configuring multiple hostnames or IP addresses." -msgstr "" -"Most OpenStack services can use Memcached to store ephemeral data such as " -"tokens. Although Memcached does not support typical forms of redundancy such " -"as clustering, OpenStack services can use almost any number of instances by " -"configuring multiple hostnames or IP addresses." - -msgid "" -"Most distributions ship an example configuration file (:file:`corosync.conf." -"example`) as part of the documentation bundled with the Corosync package. An " -"example Corosync configuration file is shown below:" -msgstr "" -"Most distributions ship an example configuration file (:file:`corosync.conf." -"example`) as part of the documentation bundled with the Corosync package. An " -"example Corosync configuration file is shown below:" - -msgid "" -"Most high availability systems fail in the event of multiple independent " -"(non-consequential) failures. In this case, most implementations favor " -"protecting data over maintaining availability." -msgstr "" -"Most High Availability systems fail in the event of multiple independent " -"(non-consequential) failures. In this case, most implementations favour " -"protecting data over maintaining availability." - -msgid "" -"Most high availability systems guarantee protection against system downtime " -"and data loss only in the event of a single failure. However, they are also " -"expected to protect against cascading failures, where a single failure " -"deteriorates into a series of consequential failures. Many service providers " -"guarantee a :term:`Service Level Agreement (SLA)` including uptime " -"percentage of computing service, which is calculated based on the available " -"time and system downtime excluding planned outage time." -msgstr "" -"Most High Availability systems guarantee protection against system downtime " -"and data loss only in the event of a single failure. However, they are also " -"expected to protect against cascading failures, where a single failure " -"deteriorates into a series of consequential failures. Many service providers " -"guarantee a :term:`Service Level Agreement (SLA)` including uptime " -"percentage of computing service, which is calculated based on the available " -"time and system downtime excluding planned outage time." - -msgid "" -"Multicast groups (``mcastaddr``) must not be reused across cluster " -"boundaries. No two distinct clusters should ever use the same multicast " -"group. Be sure to select multicast addresses compliant with `RFC 2365, " -"\"Administratively Scoped IP Multicast\" `_." -msgstr "" -"Multicast groups (``mcastaddr``) must not be reused across cluster " -"boundaries. No two distinct clusters should ever use the same multicast " -"group. Be sure to select multicast addresses compliant with `RFC 2365, " -"\"Administratively Scoped IP Multicast\" `_." - -msgid "" -"MySQL databases, including MariaDB and Percona XtraDB, manage their " -"configurations using a ``my.cnf`` file, which is typically located in the ``/" -"etc`` directory. Configuration options available in these databases are also " -"available in Galera Cluster, with some restrictions and several additions." -msgstr "" -"MySQL databases, including MariaDB and Percona XtraDB, manage their " -"configurations using a ``my.cnf`` file, which is typically located in the ``/" -"etc`` directory. Configuration options available in these databases are also " -"available in Galera Cluster, with some restrictions and several additions." - -msgid "NIC" -msgstr "NIC" - -msgid "Network components, such as switches and routers" -msgstr "Network components, such as switches and routers" - -msgid "Networking L2 agent" -msgstr "Networking L2 agent" - -msgid "No firewalls between the hosts" -msgstr "No firewalls between the hosts" - -msgid "Node type" -msgstr "Node type" - -msgid "Note the following about the recommended interface configuration:" -msgstr "Note the following about the recommended interface configuration:" - -msgid "Note the following:" -msgstr "Note the following:" - -msgid "" -"Older versions of some distributions, which do not have an up-to-date policy " -"for securing Galera, may also require SELinux to be more relaxed about " -"database access and actions:" -msgstr "" -"Older versions of some distributions, which do not have an up-to-date policy " -"for securing Galera, may also require SELinux to be more relaxed about " -"database access and actions:" - -msgid "On CentOS, RHEL, openSUSE, and SLES:" -msgstr "On CentOS, RHEL, openSUSE, and SLES:" - -msgid "" -"On RHEL-based systems, create resources for cinder's systemd agents and " -"create constraints to enforce startup/shutdown ordering:" -msgstr "" -"On RHEL-based systems, create resources for cinder's systemd agents and " -"create constraints to enforce start-up/shutdown ordering:" - -msgid "" -"On ``3306``, Galera Cluster uses TCP for database client connections and " -"State Snapshot Transfers methods that require the client, (that is, " -"``mysqldump``)." -msgstr "" -"On ``3306``, Galera Cluster uses TCP for database client connections and " -"State Snapshot Transfers methods that require the client, (that is, " -"``mysqldump``)." - -msgid "" -"On ``4444``, Galera Cluster uses TCP for all other State Snapshot Transfer " -"methods." -msgstr "" -"On ``4444``, Galera Cluster uses TCP for all other State Snapshot Transfer " -"methods." - -msgid "" -"On ``4567``, Galera Cluster uses TCP for replication traffic. Multicast " -"replication uses both TCP and UDP on this port." -msgstr "" -"On ``4567``, Galera Cluster uses TCP for replication traffic. Multicast " -"replication uses both TCP and UDP on this port." - -msgid "On ``4568``, Galera Cluster uses TCP for Incremental State Transfers." -msgstr "On ``4568``, Galera Cluster uses TCP for Incremental State Transfers." - -msgid "" -"On any host that is meant to be part of a Pacemaker cluster, establish " -"cluster communications through the Corosync messaging layer. This involves " -"installing the following packages (and their dependencies, which your " -"package manager usually installs automatically):" -msgstr "" -"On any host that is meant to be part of a Pacemaker cluster, establish " -"cluster communications through the Corosync messaging layer. This involves " -"installing the following packages (and their dependencies, which your " -"package manager usually installs automatically):" - -msgid "" -"On each target node, verify the correct owner, group, and permissions of the " -"file :file:`erlang.cookie`:" -msgstr "" -"On each target node, verify the correct owner, group, and permissions of the " -"file :file:`erlang.cookie`:" - -msgid "" -"On the infrastructure layer, the SLA is the time for which RabbitMQ cluster " -"reassembles. Several cases are possible. The Mnesia keeper node is the " -"master of the corresponding Pacemaker resource for RabbitMQ. When it fails, " -"the result is a full AMQP cluster downtime interval. Normally, its SLA is no " -"more than several minutes. Failure of another node that is a slave of the " -"corresponding Pacemaker resource for RabbitMQ results in no AMQP cluster " -"downtime at all." -msgstr "" -"On the infrastructure layer, the SLA is the time for which RabbitMQ cluster " -"reassembles. Several cases are possible. The Mnesia keeper node is the " -"master of the corresponding Pacemaker resource for RabbitMQ. When it fails, " -"the result is a full AMQP cluster downtime interval. Normally, its SLA is no " -"more than several minutes. Failure of another node that is a slave of the " -"corresponding Pacemaker resource for RabbitMQ results in no AMQP cluster " -"downtime at all." - -msgid "" -"Once completed, commit your configuration changes by entering :command:" -"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " -"Block Storage API service and its dependent resources on one of your nodes." -msgstr "" -"Once completed, commit your configuration changes by entering :command:" -"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " -"Block Storage API service and its dependent resources on one of your nodes." - -msgid "" -"Once created, synchronize the :file:`corosync.conf` file (and the :file:" -"`authkey` file if the secauth option is enabled) across all cluster nodes." -msgstr "" -"Once created, synchronise the :file:`corosync.conf` file (and the :file:" -"`authkey` file if the secauth option is enabled) across all cluster nodes." - -msgid "" -"Once the database server starts, check the cluster status using the " -"``wsrep_cluster_size`` status variable. From the database client, run the " -"following command:" -msgstr "" -"Once the database server starts, check the cluster status using the " -"``wsrep_cluster_size`` status variable. From the database client, run the " -"following command:" - -msgid "" -"One physical server can support multiple nodes, each of which supports " -"almost any number of network interfaces." -msgstr "" -"One physical server can support multiple nodes, each of which supports " -"almost any number of network interfaces." - -msgid "" -"Only one instance for the central and compute agent service(s) is able to " -"run and function correctly if the ``backend_url`` option is not set." -msgstr "" -"Only one instance for the central and compute agent service(s) is able to " -"run and function correctly if the ``backend_url`` option is not set." - -msgid "" -"OpenStack APIs: APIs that are HTTP(s) stateless services written in python, " -"easy to duplicate and mostly easy to load balance." -msgstr "" -"OpenStack APIs: APIs that are HTTP(s) stateless services written in python, " -"easy to duplicate and mostly easy to load balance." - -msgid "OpenStack Block Storage" -msgstr "OpenStack Block Storage" - -msgid "OpenStack Compute" -msgstr "OpenStack Compute" - -msgid "OpenStack High Availability Guide" -msgstr "OpenStack High Availability Guide" - -msgid "OpenStack Networking" -msgstr "OpenStack Networking" - -msgid "" -"OpenStack currently meets such availability requirements for its own " -"infrastructure services, meaning that an uptime of 99.99% is feasible for " -"the OpenStack infrastructure proper. However, OpenStack does not guarantee " -"99.99% availability for individual guest instances." -msgstr "" -"OpenStack currently meets such availability requirements for its own " -"infrastructure services, meaning that an uptime of 99.99% is feasible for " -"the OpenStack infrastructure proper. However, OpenStack does not guarantee " -"99.99% availability for individual guest instances." - -msgid "" -"OpenStack does not require a significant amount of resources and the " -"following minimum requirements should support a proof-of-concept high " -"availability environment with core services and several instances:" -msgstr "" -"OpenStack does not require a significant amount of resources and the " -"following minimum requirements should support a proof-of-concept high " -"availability environment with core services and several instances:" - -msgid "" -"OpenStack is a set of services exposed to the end users as HTTP(s) APIs. " -"Additionally, for your own internal usage, OpenStack requires an SQL " -"database server and AMQP broker. The physical servers, where all the " -"components are running, are called controllers. This modular OpenStack " -"architecture allows you to duplicate all the components and run them on " -"different controllers. By making all the components redundant, it is " -"possible to make OpenStack highly available." -msgstr "" -"OpenStack is a set of services exposed to the end users as HTTP(s) APIs. " -"Additionally, for your own internal usage, OpenStack requires an SQL " -"database server and AMQP broker. The physical servers, where all the " -"components are running, are called controllers. This modular OpenStack " -"architecture allows you to duplicate all the components and run them on " -"different controllers. By making all the components redundant, it is " -"possible to make OpenStack highly available." - -msgid "OpenStack network nodes contain:" -msgstr "OpenStack network nodes contain:" - -msgid "" -"OpenStack services are configured with the list of these IP addresses so " -"they can select one of the addresses from those available." -msgstr "" -"OpenStack services are configured with the list of these IP addresses so " -"they can select one of the addresses from those available." - -msgid "" -"OpenStack supports a single-controller high availability mode that is " -"managed by the services that manage highly available environments but is not " -"actually highly available because no redundant controllers are configured to " -"use for failover. This environment can be used for study and demonstration " -"but is not appropriate for a production environment." -msgstr "" -"OpenStack supports a single-controller High Availability mode that is " -"managed by the services that manage highly available environments but is not " -"actually highly available because no redundant controllers are configured to " -"use for failover. This environment can be used for study and demonstration " -"but is not appropriate for a production environment." - -msgid "Overview of highly available controllers" -msgstr "Overview of highly available controllers" - -msgid "Pacemaker cluster stack" -msgstr "Pacemaker cluster stack" - -msgid "" -"Pacemaker does not inherently understand the applications it manages. " -"Instead, it relies on resource agents (RAs) that are scripts that " -"encapsulate the knowledge of how to start, stop, and check the health of " -"each application managed by the cluster." -msgstr "" -"Pacemaker does not inherently understand the applications it manages. " -"Instead, it relies on resource agents (RAs) that are scripts that " -"encapsulate the knowledge of how to start, stop, and check the health of " -"each application managed by the cluster." - -msgid "" -"Pacemaker now starts the OpenStack Identity service and its dependent " -"resources on all of your nodes." -msgstr "" -"Pacemaker now starts the OpenStack Identity service and its dependent " -"resources on all of your nodes." - -msgid "" -"Pacemaker now starts the Shared File Systems API service and its dependent " -"resources on one of your nodes." -msgstr "" -"Pacemaker now starts the Shared File Systems API service and its dependent " -"resources on one of your nodes." - -msgid "" -"Pacemaker relies on the `Corosync `_ " -"messaging layer for reliable cluster communications. Corosync implements the " -"Totem single-ring ordering and membership protocol. It also provides UDP and " -"InfiniBand based messaging, quorum, and cluster membership to Pacemaker." -msgstr "" -"Pacemaker relies on the `Corosync `_ " -"messaging layer for reliable cluster communications. Corosync implements the " -"Totem single-ring ordering and membership protocol. It also provides UDP and " -"InfiniBand based messaging, quorum, and cluster membership to Pacemaker." - -msgid "" -"Pacemaker ships with a large set of OCF agents (such as those managing MySQL " -"databases, virtual IP addresses, and RabbitMQ), but can also use any agents " -"already installed on your system and can be extended with your own (see the " -"`developer guide `_)." -msgstr "" -"Pacemaker ships with a large set of OCF agents (such as those managing MySQL " -"databases, virtual IP addresses, and RabbitMQ), but can also use any agents " -"already installed on your system and can be extended with your own (see the " -"`developer guide `_)." - -msgid "" -"Pacemaker then starts the OpenStack Image API service and its dependent " -"resources on one of your nodes." -msgstr "" -"Pacemaker then starts the OpenStack Image API service and its dependent " -"resources on one of your nodes." - -msgid "" -"Pacemaker uses an event-driven approach to cluster state processing. The " -"``cluster-recheck-interval`` parameter (which defaults to 15 minutes) " -"defines the interval at which certain Pacemaker actions occur. It is usually " -"prudent to reduce this to a shorter interval, such as 5 or 3 minutes." -msgstr "" -"Pacemaker uses an event-driven approach to cluster state processing. The " -"``cluster-recheck-interval`` parameter (which defaults to 15 minutes) " -"defines the interval at which certain Pacemaker actions occur. It is usually " -"prudent to reduce this to a shorter interval, such as 5 or 3 minutes." - -msgid "Parameter" -msgstr "Parameter" - -msgid "" -"Persistent block storage can survive instance termination and can also be " -"moved across instances like any external storage device. Cinder also has " -"volume snapshots capability for backing up the volumes." -msgstr "" -"Persistent block storage can survive instance termination and can also be " -"moved across instances like any external storage device. Cinder also has " -"volume snapshots capability for backing up the volumes." - -msgid "" -"Persistent storage exists outside all instances. Two types of persistent " -"storage are provided:" -msgstr "" -"Persistent storage exists outside all instances. Two types of persistent " -"storage are provided:" - -msgid "Possible options are:" -msgstr "Possible options are:" - -msgid "Prerequisites" -msgstr "Prerequisites" - -msgid "Processor Cores" -msgstr "Processor Cores" - -msgid "" -"Production servers should run (at least) three RabbitMQ servers for testing " -"and demonstration purposes, however it is possible to run only two servers. " -"In this section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. " -"To build a broker, ensure that all nodes have the same Erlang cookie file." -msgstr "" -"Production servers should run (at least) three RabbitMQ servers for testing " -"and demonstration purposes, however it is possible to run only two servers. " -"In this section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. " -"To build a broker, ensure that all nodes have the same Erlang cookie file." - -msgid "" -"Provider networks: See the *Overview -> Networking Option 1: Provider " -"networks* section of the `Install Guides `_ depending on your distribution." -msgstr "" -"Provider networks: See the *Overview -> Networking Option 1: Provider " -"networks* section of the `Install Guides `_ depending on your distribution." - -msgid "Proxy server" -msgstr "Proxy server" - -msgid "Query the quorum status" -msgstr "Query the quorum status" - -msgid "" -"Quorum becomes important when a failure causes the cluster to split in two " -"or more partitions. In this situation, you want the majority members of the " -"system to ensure the minority are truly dead (through fencing) and continue " -"to host resources. For a two-node cluster, no side has the majority and you " -"can end up in a situation where both sides fence each other, or both sides " -"are running the same services. This can lead to data corruption." -msgstr "" -"Quorum becomes important when a failure causes the cluster to split in two " -"or more partitions. In this situation, you want the majority members of the " -"system to ensure the minority are truly dead (through fencing) and continue " -"to host resources. For a two-node cluster, no side has the majority and you " -"can end up in a situation where both sides fence each other, or both sides " -"are running the same services. This can lead to data corruption." - -msgid "RAID drives" -msgstr "RAID drives" - -msgid "RabbitMQ" -msgstr "RabbitMQ" - -msgid "" -"RabbitMQ HA cluster Transport URL using ``[user:pass@]host:port`` format:" -msgstr "" -"RabbitMQ HA cluster Transport URL using ``[user:pass@]host:port`` format:" - -msgid "" -"RabbitMQ nodes fail over on the application and the infrastructure layers." -msgstr "" -"RabbitMQ nodes fail over on the application and the infrastructure layers." - -msgid "Receive notifications of quorum state changes" -msgstr "Receive notifications of quorum state changes" - -msgid "Recommended for testing." -msgstr "Recommended for testing." - -msgid "Recommended solution by the Tooz project." -msgstr "Recommended solution by the Tooz project." - -msgid "Red Hat" -msgstr "Red Hat" - -msgid "Redundancy and failover" -msgstr "Redundancy and failover" - -msgid "" -"Regardless of which flavor you choose, we recommend that clusters contain at " -"least three nodes so that you can take advantage of `quorum `_." -msgstr "" -"Regardless of which flavour you choose, we recommend that clusters contain " -"at least three nodes so that you can take advantage of `quorum `_." - -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database." -msgstr "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database." - -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database. Replace ``CINDER_PASS`` with the password you chose for the " -"``cinder`` user in the Identity service." -msgstr "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database. Replace ``CINDER_PASS`` with the password you chose for the " -"``cinder`` user in the Identity service." - -msgid "" -"Replace ``RABBIT_USER`` with RabbitMQ username and ``RABBIT_PASS`` with " -"password for respective RabbitMQ host. For more information, see `oslo " -"messaging transport `_." -msgstr "" -"Replace ``RABBIT_USER`` with RabbitMQ username and ``RABBIT_PASS`` with " -"password for respective RabbitMQ host. For more information, see `oslo " -"messaging transport `_." - -msgid "" -"Replace the IP addresses given here with comma-separated list of each " -"OpenStack database in your cluster." -msgstr "" -"Replace the IP addresses given here with comma-separated list of each " -"OpenStack database in your cluster." - -msgid "" -"Restart AppArmor. For servers that use ``init``, run the following command:" -msgstr "" -"Restart AppArmor. For servers that use ``init``, run the following command:" - -msgid "Restart the HAProxy service." -msgstr "Restart the HAProxy service." - -msgid "Restart the host or, to make changes work immediately, invoke:" -msgstr "Restart the host or, to make changes work immediately, invoke:" - -msgid "Restarting the cluster" -msgstr "Restarting the cluster" - -msgid "Retry connecting with RabbitMQ:" -msgstr "Retry connecting with RabbitMQ:" - -msgid "Run Networking DHCP agent" -msgstr "Run Networking DHCP agent" - -msgid "Run Networking L3 agent" -msgstr "Run Networking L3 agent" - -msgid "Run the following commands on each node except the first one:" -msgstr "Run the following commands on each node except the first one:" - -msgid "" -"Run the following commands to download the OpenStack Identity resource to " -"Pacemaker:" -msgstr "" -"Run the following commands to download the OpenStack Identity resource to " -"Pacemaker:" - -msgid "SELinux" -msgstr "SELinux" - -msgid "SELinux and AppArmor set to permit access to ``mysqld``" -msgstr "SELinux and AppArmor set to permit access to ``mysqld``" - -msgid "SUSE" -msgstr "SUSE" - -msgid "" -"SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE, use a " -"set of OCF agents for controlling OpenStack services." -msgstr "" -"SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE, use a " -"set of OCF agents for controlling OpenStack services." - -msgid "" -"Security-Enhanced Linux is a kernel module for improving security on Linux " -"operating systems. It is commonly enabled and configured by default on Red " -"Hat-based distributions. In the context of Galera Cluster, systems with " -"SELinux may block the database service, keep it from starting, or prevent it " -"from establishing network connections with the cluster." -msgstr "" -"Security-Enhanced Linux is a kernel module for improving security on Linux " -"operating systems. It is commonly enabled and configured by default on Red " -"Hat-based distributions. In the context of Galera Cluster, systems with " -"SELinux may block the database service, keep it from starting, or prevent it " -"from establishing network connections with the cluster." - -msgid "Segregated" -msgstr "Segregated" - -msgid "" -"Self-service networks: See the *Overview -> Networking Option 2: Self-" -"service networks* section of the `Install Guides `_ depending on your distribution." -msgstr "" -"Self-service networks: See the *Overview -> Networking Option 2: Self-" -"service networks* section of the `Install Guides `_ depending on your distribution." - -msgid "" -"Services like RabbitMQ and Galera have complicated boot-up sequences that " -"require co-ordination, and often serialization, of startup operations across " -"all machines in the cluster. This is especially true after a site-wide " -"failure or shutdown where you must first determine the last machine to be " -"active." -msgstr "" -"Services like RabbitMQ and Galera have complicated boot-up sequences that " -"require co-ordination, and often serialisation, of start-up operations " -"across all machines in the cluster. This is especially true after a site-" -"wide failure or shutdown where you must first determine the last machine to " -"be active." - -msgid "Set a password for hacluster user on each host:" -msgstr "Set a password for hacluster user on each host:" - -msgid "Set automatic L3 agent failover for routers" -msgstr "Set automatic L3 agent failover for routers" - -msgid "Set basic cluster properties" -msgstr "Set basic cluster properties" - -msgid "Set up Corosync with multicast" -msgstr "Set up Corosync with multicast" - -msgid "Set up Corosync with unicast" -msgstr "Set up Corosync with unicast" - -msgid "Set up Corosync with votequorum library" -msgstr "Set up Corosync with votequorum library" - -msgid "Set up the cluster with `crmsh`" -msgstr "Set up the cluster with `crmsh`" - -msgid "Set up the cluster with pcs" -msgstr "Set up the cluster with pcs" - -msgid "" -"Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) " -"feature. By default, it is disabled (set to 0). If a cluster is on the " -"quorum edge (``expected_votes:`` set to 7; ``online nodes:`` set to 4) for " -"longer than the time specified for the ``last_man_standing_window`` " -"parameter, the cluster can recalculate quorum and continue operating even if " -"the next node will be lost. This logic is repeated until the number of " -"online nodes in the cluster reaches 2. In order to allow the cluster to step " -"down from 2 members to only 1, the ``auto_tie_breaker`` parameter needs to " -"be set. We do not recommended this for production environments." -msgstr "" -"Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) " -"feature. By default, it is disabled (set to 0). If a cluster is on the " -"quorum edge (``expected_votes:`` set to 7; ``online nodes:`` set to 4) for " -"longer than the time specified for the ``last_man_standing_window`` " -"parameter, the cluster can recalculate quorum and continue operating even if " -"the next node will be lost. This logic is repeated until the number of " -"online nodes in the cluster reaches 2. In order to allow the cluster to step " -"down from 2 members to only 1, the ``auto_tie_breaker`` parameter needs to " -"be set. We do not recommended this for production environments." - -msgid "" -"Setting the ``pe-warn-series-max``, ``pe-input-series-max``, and ``pe-error-" -"series-max`` parameters to 1000 instructs Pacemaker to keep a longer history " -"of the inputs processed and errors and warnings generated by its Policy " -"Engine. This history is useful if you need to troubleshoot the cluster." -msgstr "" -"Setting the ``pe-warn-series-max``, ``pe-input-series-max``, and ``pe-error-" -"series-max`` parameters to 1000 instructs Pacemaker to keep a longer history " -"of the inputs processed and errors and warnings generated by its Policy " -"Engine. This history is useful if you need to troubleshoot the cluster." - -msgid "" -"Setting this parameter to ``0`` or ``2`` can improve performance, but it " -"introduces certain dangers. Operating system failures can erase the last " -"second of transactions. While you can recover this data from another node, " -"if the cluster goes down at the same time (in the event of a data center " -"power outage), you lose this data permanently." -msgstr "" -"Setting this parameter to ``0`` or ``2`` can improve performance, but it " -"introduces certain dangers. Operating system failures can erase the last " -"second of transactions. While you can recover this data from another node, " -"if the cluster goes down at the same time (in the event of a data centre " -"power outage), you lose this data permanently." - -msgid "Simplified process for adding/removing of nodes" -msgstr "Simplified process for adding/removing of nodes" - -msgid "" -"Since all API access is directed to the proxy, adding or removing nodes has " -"no impact on the configuration of other services. This can be very useful in " -"upgrade scenarios where an entirely new set of machines can be configured " -"and tested in isolation before telling the proxy to direct traffic there " -"instead." -msgstr "" -"Since all API access is directed to the proxy, adding or removing nodes has " -"no impact on the configuration of other services. This can be very useful in " -"upgrade scenarios where an entirely new set of machines can be configured " -"and tested in isolation before telling the proxy to direct traffic there " -"instead." - -msgid "" -"Since the cluster is a single administrative domain, it is acceptable to use " -"the same password on all nodes." -msgstr "" -"Since the cluster is a single administrative domain, it is acceptable to use " -"the same password on all nodes." - -msgid "Single-controller high availability mode" -msgstr "Single-controller High Availability mode" - -msgid "" -"Specifying ``corosync_votequorum`` enables the votequorum library. This is " -"the only required option." -msgstr "" -"Specifying ``corosync_votequorum`` enables the votequorum library. This is " -"the only required option." - -msgid "Start Corosync" -msgstr "Start Corosync" - -msgid "Start Pacemaker" -msgstr "Start Pacemaker" - -msgid "Start ``corosync`` with systemd unit file:" -msgstr "Start ``corosync`` with systemd unit file:" - -msgid "Start ``corosync`` with the LSB init script:" -msgstr "Start ``corosync`` with the LSB init script:" - -msgid "Start ``corosync`` with upstart:" -msgstr "Start ``corosync`` with upstart:" - -msgid "Start ``pacemaker`` with the LSB init script:" -msgstr "Start ``pacemaker`` with the LSB init script:" - -msgid "Start ``pacemaker`` with the systemd unit file:" -msgstr "Start ``pacemaker`` with the systemd unit file:" - -msgid "Start ``pacemaker`` with upstart:" -msgstr "Start ``pacemaker`` with upstart:" - -msgid "" -"Start the ``xinetd`` daemon for ``clustercheck``. For servers that use " -"``init``, run the following commands:" -msgstr "" -"Start the ``xinetd`` daemon for ``clustercheck``. For servers that use " -"``init``, run the following commands:" - -msgid "" -"Start the database server on all other cluster nodes. For servers that use " -"``init``, run the following command:" -msgstr "" -"Start the database server on all other cluster nodes. For servers that use " -"``init``, run the following command:" - -msgid "" -"Start the message queue service on all nodes and configure it to start when " -"the system boots. On Ubuntu, it is configured by default." -msgstr "" -"Start the message queue service on all nodes and configure it to start when " -"the system boots. On Ubuntu, it is configured by default." - -msgid "Stateful service" -msgstr "Stateful service" - -msgid "" -"Stateful services can be configured as active/passive or active/active, " -"which are defined as follows:" -msgstr "" -"Stateful services can be configured as active/passive or active/active, " -"which are defined as follows:" - -msgid "Stateless service" -msgstr "Stateless service" - -msgid "Stateless versus stateful services" -msgstr "Stateless versus stateful services" - -msgid "" -"Stop RabbitMQ and copy the cookie from the first node to each of the other " -"node(s):" -msgstr "" -"Stop RabbitMQ and copy the cookie from the first node to each of the other " -"node(s):" - -msgid "Storage" -msgstr "Storage" - -msgid "Storage back end" -msgstr "Storage back end" - -msgid "Storage components" -msgstr "Storage components" - -msgid "" -"System downtime: Occurs when a user-facing service is unavailable beyond a " -"specified maximum amount of time." -msgstr "" -"System downtime: Occurs when a user-facing service is unavailable beyond a " -"specified maximum amount of time." - -msgid "Telemetry" -msgstr "Telemetry" - -msgid "Telemetry polling agent" -msgstr "Telemetry polling agent" - -msgid "" -"The :command:`crm configure` command supports batch input. Copy and paste " -"the lines in the next step into your live Pacemaker configuration and then " -"make changes as required." -msgstr "" -"The :command:`crm configure` command supports batch input. Copy and paste " -"the lines in the next step into your live Pacemaker configuration and then " -"make changes as required." - -msgid "" -"The :command:`crm configure` supports batch input. Copy and paste the lines " -"in the next step into your live Pacemaker configuration and then make " -"changes as required." -msgstr "" -"The :command:`crm configure` supports batch input. Copy and paste the lines " -"in the next step into your live Pacemaker configuration and then make " -"changes as required." - -msgid "" -"The :command:`crm configure` supports batch input. You may have to copy and " -"paste the above lines into your live Pacemaker configuration, and then make " -"changes as required." -msgstr "" -"The :command:`crm configure` supports batch input. You may have to copy and " -"paste the above lines into your live Pacemaker configuration, and then make " -"changes as required." - -msgid "" -"The Block Storage service (cinder) that can use LVM or Ceph RBD as the " -"storage back end." -msgstr "" -"The Block Storage service (Cinder) that can use LVM or Ceph RBD as the " -"storage back end." - -msgid "" -"The Galera cluster configuration directive ``backup`` indicates that two of " -"the three controllers are standby nodes. This ensures that only one node " -"services write requests because OpenStack support for multi-node writes is " -"not yet production-ready." -msgstr "" -"The Galera cluster configuration directive ``backup`` indicates that two of " -"the three controllers are standby nodes. This ensures that only one node " -"services write requests because OpenStack support for multi-node writes is " -"not yet production-ready." - -msgid "" -"The Image service (glance) that can use the Object Storage service (swift) " -"or Ceph RBD as the storage back end." -msgstr "" -"The Image service (Glance) that can use the Object Storage service (Swift) " -"or Ceph RBD as the storage back end." - -msgid "" -"The L2 agent cannot be distributed and highly available. Instead, it must be " -"installed on each data forwarding node to control the virtual network driver " -"such as Open vSwitch or Linux Bridge. One L2 agent runs per node and " -"controls its virtual interfaces." -msgstr "" -"The L2 agent cannot be distributed and Highly Available. Instead, it must be " -"installed on each data forwarding node to control the virtual network driver " -"such as Open vSwitch or Linux Bridge. One L2 agent runs per node and " -"controls its virtual interfaces." - -msgid "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance impacts only a percentage of the objects " -"and the client automatically removes it from the list of instances. The SLA " -"is several minutes." -msgstr "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance impacts only a percentage of the objects " -"and the client automatically removes it from the list of instances. The SLA " -"is several minutes." - -msgid "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance only impacts a percentage of the objects, " -"and the client automatically removes it from the list of instances." -msgstr "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance only impacts a percentage of the objects, " -"and the client automatically removes it from the list of instances." - -msgid "" -"The Networking (neutron) service L3 agent is scalable, due to the scheduler " -"that supports Virtual Router Redundancy Protocol (VRRP) to distribute " -"virtual routers across multiple nodes. For more information about the VRRP " -"and keepalived, see `Linux bridge: High availability using VRRP `_ and " -"`Open vSwitch: High availability using VRRP `_." -msgstr "" -"The Networking (Neutron) service L3 agent is scalable, due to the scheduler " -"that supports Virtual Router Redundancy Protocol (VRRP) to distribute " -"virtual routers across multiple nodes. For more information about the VRRP " -"and keepalived, see `Linux bridge: High availability using VRRP `_ and " -"`Open vSwitch: High availability using VRRP `_." - -msgid "" -"The OpenStack HA community used to hold `weekly IRC meetings `_ to discuss a range of topics " -"relating to HA in OpenStack. The `logs of all past meetings `_ are still available to read." -msgstr "" -"The OpenStack HA community used to hold `weekly IRC meetings `_ to discuss a range of topics " -"relating to HA in OpenStack. The `logs of all past meetings `_ are still available to read." - -msgid "" -"The OpenStack HA team is based on voluntary contributions from the OpenStack " -"community. You can contact the HA community directly in the #openstack-ha " -"channel on Freenode IRC, or by sending mail to the openstack-dev mailing " -"list with the [HA] prefix in the subject header." -msgstr "" -"The OpenStack HA team is based on voluntary contributions from the OpenStack " -"community. You can contact the HA community directly in the #openstack-ha " -"channel on Freenode IRC, or by sending mail to the openstack-dev mailing " -"list with the [HA] prefix in the subject header." - -msgid "" -"The OpenStack Image service offers a service for discovering, registering, " -"and retrieving virtual machine images. To make the OpenStack Image API " -"service highly available in active/passive mode, you must:" -msgstr "" -"The OpenStack Image service offers a service for discovering, registering, " -"and retrieving virtual machine images. To make the OpenStack Image API " -"service highly available in active/passive mode, you must:" - -msgid "" -"The OpenStack Installation Guides also include a list of the services that " -"use passwords with important notes about using them." -msgstr "" -"The OpenStack Installation Guides also include a list of the services that " -"use passwords with important notes about using them." - -msgid "" -"The OpenStack Networking (neutron) service has a scheduler that lets you run " -"multiple agents across nodes. The DHCP agent can be natively highly " -"available." -msgstr "" -"The OpenStack Networking (Neutron) service has a scheduler that lets you run " -"multiple agents across nodes. The DHCP agent can be natively highly " -"available." - -msgid "The Pacemaker architecture" -msgstr "The Pacemaker architecture" - -msgid "" -"The Pacemaker service also requires an additional configuration file ``/etc/" -"corosync/uidgid.d/pacemaker`` to be created with the following content:" -msgstr "" -"The Pacemaker service also requires an additional configuration file ``/etc/" -"corosync/uidgid.d/pacemaker`` to be created with the following content:" - -msgid "" -"The SQL relational database server provides stateful type consumed by other " -"components. Supported databases are MySQL, MariaDB, and PostgreSQL. Making " -"the SQL database redundant is complex." -msgstr "" -"The SQL relational database server provides stateful type consumed by other " -"components. Supported databases are MySQL, MariaDB and PostgreSQL. Making " -"the SQL database redundant is complex." - -msgid "" -"The Telemetry API service configuration does not have the ``option httpchk`` " -"directive as it cannot process this check properly." -msgstr "" -"The Telemetry API service configuration does not have the ``option httpchk`` " -"directive as it cannot process this check properly." - -msgid "" -"The Telemetry polling agent can be configured to partition its polling " -"workload between multiple agents. This enables high availability (HA)." -msgstr "" -"The Telemetry polling agent can be configured to partition its polling " -"workload between multiple agents. This enables high availability (HA)." - -msgid "" -"The `Installation Guides `_ " -"provide instructions for installing multiple compute nodes. To make the " -"compute nodes highly available, you must configure the environment to " -"include multiple instances of the API and other services." -msgstr "" -"The `Installation Guides `_ " -"provide instructions for installing multiple compute nodes. To make the " -"compute nodes highly available, you must configure the environment to " -"include multiple instances of the API and other services." - -msgid "" -"The `Telemetry service `_ provides a data collection service and an alarming " -"service." -msgstr "" -"The `Telemetry service `_ provides a data collection service and an alarming " -"service." - -msgid "" -"The `Tooz `_ library provides the " -"coordination within the groups of service instances. It provides an API " -"above several back ends that can be used for building distributed " -"applications." -msgstr "" -"The `Tooz `_ library provides the " -"coordination within the groups of service instances. It provides an API " -"above several back ends that can be used for building distributed " -"applications." - -msgid "" -"The ``-p`` option is used to give the password on command line and makes it " -"easier to script." -msgstr "" -"The ``-p`` option is used to give the password on command line and makes it " -"easier to script." - -msgid "" -"The ``admin_bind_host`` parameter lets you use a private network for admin " -"access." -msgstr "" -"The ``admin_bind_host`` parameter lets you use a private network for admin " -"access." - -msgid "" -"The ``bindnetaddr`` is the network address of the interfaces to bind to. The " -"example uses two network addresses of /24 IPv4 subnets." -msgstr "" -"The ``bindnetaddr`` is the network address of the interfaces to bind to. The " -"example uses two network addresses of /24 IPv4 subnets." - -msgid "" -"The ``token`` value specifies the time, in milliseconds, during which the " -"Corosync token is expected to be transmitted around the ring. When this " -"timeout expires, the token is declared lost, and after " -"``token_retransmits_before_loss_const lost`` tokens, the non-responding " -"processor (cluster node) is declared dead. ``token × " -"token_retransmits_before_loss_const`` is the maximum time a node is allowed " -"to not respond to cluster messages before being considered dead. The default " -"for token is 1000 milliseconds (1 second), with 4 allowed retransmits. These " -"defaults are intended to minimize failover times, but can cause frequent " -"false alarms and unintended failovers in case of short network " -"interruptions. The values used here are safer, albeit with slightly extended " -"failover times." -msgstr "" -"The ``token`` value specifies the time, in milliseconds, during which the " -"Corosync token is expected to be transmitted around the ring. When this " -"timeout expires, the token is declared lost, and after " -"``token_retransmits_before_loss_const lost`` tokens, the non-responding " -"processor (cluster node) is declared dead. ``token × " -"token_retransmits_before_loss_const`` is the maximum time a node is allowed " -"to not respond to cluster messages before being considered dead. The default " -"for token is 1000 milliseconds (1 second), with 4 allowed retransmits. These " -"defaults are intended to minimise failover times, but can cause frequent " -"false alarms and unintended failovers in case of short network " -"interruptions. The values used here are safer, albeit with slightly extended " -"failover times." - -msgid "" -"The ``transport`` directive controls the transport mechanism. To avoid the " -"use of multicast entirely, specify the ``udpu`` unicast transport parameter. " -"This requires specifying the list of members in the ``nodelist`` directive. " -"This potentially makes up the membership before deployment. The default is " -"``udp``. The transport type can also be set to ``udpu`` or ``iba``." -msgstr "" -"The ``transport`` directive controls the transport mechanism. To avoid the " -"use of multicast entirely, specify the ``udpu`` unicast transport parameter. " -"This requires specifying the list of members in the ``nodelist`` directive. " -"This potentially makes up the membership before deployment. The default is " -"``udp``. The transport type can also be set to ``udpu`` or ``iba``." - -msgid "" -"The application layer is controlled by the ``oslo.messaging`` configuration " -"options for multiple AMQP hosts. If the AMQP node fails, the application " -"reconnects to the next one configured within the specified reconnect " -"interval. The specified reconnect interval constitutes its SLA." -msgstr "" -"The application layer is controlled by the ``oslo.messaging`` configuration " -"options for multiple AMQP hosts. If the AMQP node fails, the application " -"reconnects to the next one configured within the specified reconnect " -"interval. The specified reconnect interval constitutes its SLA." - -msgid "" -"The architectural challenges of instance HA and several currently existing " -"solutions were presented in `a talk at the Austin summit `_, for which `slides are also available `_." -msgstr "" -"The architectural challenges of instance HA and several currently existing " -"solutions were presented in `a talk at the Austin summit `_, for which `slides are also available `_." - -msgid "" -"The architectures differ in the sets of services managed by the cluster." -msgstr "" -"The architectures differ in the sets of services managed by the cluster." - -msgid "" -"The availability check of the instances is provided by heartbeat messages. " -"When the connection with an instance is lost, the workload will be " -"reassigned within the remaining instances in the next polling cycle." -msgstr "" -"The availability check of the instances is provided by heartbeat messages. " -"When the connection with an instance is lost, the workload will be " -"reassigned within the remaining instances in the next polling cycle." - -msgid "" -"The benefits of this approach are the physical isolation between components " -"and the ability to add capacity to specific components." -msgstr "" -"The benefits of this approach are the physical isolation between components " -"and the ability to add capacity to specific components." - -msgid "" -"The cloud controller runs on the management network and must talk to all " -"other services." -msgstr "" -"The cloud controller runs on the management network and must talk to all " -"other services." - -msgid "" -"The cluster is fully operational with ``expected_votes`` set to 7 nodes " -"(each node has 1 vote), quorum: 4. If a list of nodes is specified as " -"``nodelist``, the ``expected_votes`` value is ignored." -msgstr "" -"The cluster is fully operational with ``expected_votes`` set to 7 nodes " -"(each node has 1 vote), quorum: 4. If a list of nodes is specified as " -"``nodelist``, the ``expected_votes`` value is ignored." - -msgid "" -"The code for three of these solutions can be found online at the following " -"links:" -msgstr "" -"The code for three of these solutions can be found online at the following " -"links:" - -msgid "" -"The command :command:`crm configure` supports batch input, copy and paste " -"the lines above into your live Pacemaker configuration and then make changes " -"as required. For example, you may enter ``edit p_ip_cinder-api`` from the :" -"command:`crm configure` menu and edit the resource to match your preferred " -"virtual IP address." -msgstr "" -"The command :command:`crm configure` supports batch input, copy and paste " -"the lines above into your live Pacemaker configuration and then make changes " -"as required. For example, you may enter ``edit p_ip_cinder-api`` from the :" -"command:`crm configure` menu and edit the resource to match your preferred " -"virtual IP address." - -msgid "" -"The commands for installing RabbitMQ are specific to the Linux distribution " -"you are using." -msgstr "" -"The commands for installing RabbitMQ are specific to the Linux distribution " -"you are using." - -msgid "" -"The correct path to ``libgalera_smm.so`` given to the ``wsrep_provider`` " -"parameter" -msgstr "" -"The correct path to ``libgalera_smm.so`` given to the ``wsrep_provider`` " -"parameter" - -msgid "" -"The default node type is a disc node. In this guide, nodes join the cluster " -"as disc nodes. Also, nodes can join the cluster as RAM nodes. For more " -"details about this feature, check `Clusters with RAM nodes `_." -msgstr "" -"The default node type is a disc node. In this guide, nodes join the cluster " -"as disc nodes. Also, nodes can join the cluster as RAM nodes. For more " -"details about this feature, check `Clusters with RAM nodes `_." - -msgid "" -"The first step in setting up your highly available OpenStack cluster is to " -"install the operating system on each node. Follow the instructions in the " -"*Environment* section of the `Installation Guides `_ depending on your distribution." -msgstr "" -"The first step in setting up your highly available OpenStack cluster is to " -"install the operating system on each node. Follow the instructions in the " -"*Environment* section of the `Installation Guides `_ depending on your distribution." - -msgid "" -"The first step is to install the database that sits at the heart of the " -"cluster. To implement high availability, run an instance of the database on " -"each controller node and use Galera Cluster to provide replication between " -"them. Galera Cluster is a synchronous multi-master database cluster, based " -"on MySQL and the InnoDB storage engine. It is a high-availability service " -"that provides high system uptime, no data loss, and scalability for growth." -msgstr "" -"The first step is to install the database that sits at the heart of the " -"cluster. To implement high availability, run an instance of the database on " -"each controller node and use Galera Cluster to provide replication between " -"them. Galera Cluster is a synchronous multi-master database cluster, based " -"on MySQL and the InnoDB storage engine. It is a high-availability service " -"that provides high system uptime, no data loss, and scalability for growth." - -msgid "The following are the definitions of stateless and stateful services:" -msgstr "The following are the definitions of stateless and stateful services:" - -msgid "The following are the standard hardware requirements:" -msgstr "The following are the standard hardware requirements:" - -msgid "" -"The following components are currently unable to benefit from the use of a " -"proxy server:" -msgstr "" -"The following components are currently unable to benefit from the use of a " -"proxy server:" - -msgid "The following components/services can work with HA queues:" -msgstr "The following components/services can work with HA queues:" - -msgid "" -"The following section(s) detail how to add the OpenStack Identity resource " -"to Pacemaker on SUSE and Red Hat." -msgstr "" -"The following section(s) detail how to add the OpenStack Identity resource " -"to Pacemaker on SUSE and Red Hat." - -msgid "" -"The majority of services, needing no real orchestration, are handled by " -"systemd on each node. This approach avoids the need to coordinate service " -"upgrades or location changes with the cluster and has the added advantage of " -"more easily scaling beyond Corosync's 16 node limit. However, it will " -"generally require the addition of an enterprise monitoring solution such as " -"Nagios or Sensu for those wanting centralized failure reporting." -msgstr "" -"The majority of services, needing no real orchestration, are handled by " -"systemd on each node. This approach avoids the need to coordinate service " -"upgrades or location changes with the cluster and has the added advantage of " -"more easily scaling beyond Corosync's 16 node limit. However, it will " -"generally require the addition of an enterprise monitoring solution such as " -"Nagios or Sensu for those wanting centralized failure reporting." - -msgid "" -"The most popular AMQP implementation used in OpenStack installations is " -"RabbitMQ." -msgstr "" -"The most popular AMQP implementation used in OpenStack installations is " -"RabbitMQ." - -msgid "" -"The proxy can be configured as a secondary mechanism for detecting service " -"failures. It can even be configured to look for nodes in a degraded state " -"(such as being too far behind in the replication) and take them out of " -"circulation." -msgstr "" -"The proxy can be configured as a secondary mechanism for detecting service " -"failures. It can even be configured to look for nodes in a degraded state " -"(such as being too far behind in the replication) and take them out of " -"circulation." - -msgid "" -"The quorum specifies the minimal number of nodes that must be functional in " -"a cluster of redundant nodes in order for the cluster to remain functional. " -"When one node fails and failover transfers control to other nodes, the " -"system must ensure that data and processes remain sane. To determine this, " -"the contents of the remaining nodes are compared and, if there are " -"discrepancies, a majority rules algorithm is implemented." -msgstr "" -"The quorum specifies the minimal number of nodes that must be functional in " -"a cluster of redundant nodes in order for the cluster to remain functional. " -"When one node fails and failover transfers control to other nodes, the " -"system must ensure that data and processes remain sane. To determine this, " -"the contents of the remaining nodes are compared and, if there are " -"discrepancies, a majority rules algorithm is implemented." - -msgid "" -"The service declaration for the Pacemaker service may be placed in the :file:" -"`corosync.conf` file directly or in its own separate file, :file:`/etc/" -"corosync/service.d/pacemaker`." -msgstr "" -"The service declaration for the Pacemaker service may be placed in the :file:" -"`corosync.conf` file directly or in its own separate file, :file:`/etc/" -"corosync/service.d/pacemaker`." - -msgid "The steps to implement the Pacemaker cluster stack are:" -msgstr "The steps to implement the Pacemaker cluster stack are:" - -msgid "" -"The votequorum library has been created to replace and eliminate ``qdisk``, " -"the disk-based quorum daemon for CMAN, from advanced cluster configurations." -msgstr "" -"The votequorum library has been created to replace and eliminate ``qdisk``, " -"the disk-based quorum daemon for CMAN, from advanced cluster configurations." - -msgid "" -"The votequorum library is part of the Corosync project. It provides an " -"interface to the vote-based quorum service and it must be explicitly enabled " -"in the Corosync configuration file. The main role of votequorum library is " -"to avoid split-brain situations, but it also provides a mechanism to:" -msgstr "" -"The votequorum library is part of the Corosync project. It provides an " -"interface to the vote-based quorum service and it must be explicitly enabled " -"in the Corosync configuration file. The main role of votequorum library is " -"to avoid split-brain situations, but it also provides a mechanism to:" - -msgid "" -"These agents must conform to one of the `OCF `_, `SysV Init " -"`_, Upstart, or Systemd standards." -msgstr "" -"These agents must conform to one of the `OCF `_, `SysV Init " -"`_, Upstart, or Systemd standards." - -msgid "This can be achieved using the :command:`iptables` command:" -msgstr "This can be achieved using the :command:`iptables` command:" - -msgid "" -"This chapter describes the basic environment for high availability, such as " -"hardware, operating system, common services." -msgstr "" -"This chapter describes the basic environment for High Availability, such as " -"hardware, operating system, common services." - -msgid "" -"This chapter describes the shared services for high availability, such as " -"database, messaging service." -msgstr "" -"This chapter describes the shared services for High Availability, such as " -"database, messaging service." - -msgid "" -"This configuration creates ``p_cinder-api``, a resource for managing the " -"Block Storage API service." -msgstr "" -"This configuration creates ``p_cinder-api``, a resource for managing the " -"Block Storage API service." - -msgid "" -"This configuration creates ``p_glance-api``, a resource for managing the " -"OpenStack Image API service." -msgstr "" -"This configuration creates ``p_glance-api``, a resource for managing the " -"OpenStack Image API service." - -msgid "" -"This configuration creates ``p_keystone``, a resource for managing the " -"OpenStack Identity service." -msgstr "" -"This configuration creates ``p_keystone``, a resource for managing the " -"OpenStack Identity service." - -msgid "" -"This configuration creates ``p_manila-api``, a resource for managing the " -"Shared File Systems API service." -msgstr "" -"This configuration creates ``p_manila-api``, a resource for managing the " -"Shared File Systems API service." - -msgid "" -"This configuration creates ``vip``, a virtual IP address for use by the API " -"node (``10.0.0.11``)." -msgstr "" -"This configuration creates ``vip``, a virtual IP address for use by the API " -"node (``10.0.0.11``)." - -msgid "" -"This document discusses some common methods of implementing highly available " -"systems, with an emphasis on the core OpenStack services and other open " -"source services that are closely aligned with OpenStack." -msgstr "" -"This document discusses some common methods of implementing highly available " -"systems, with an emphasis on the core OpenStack services and other open " -"source services that are closely aligned with OpenStack." - -msgid "" -"This example assumes that you are using NFS for the physical storage, which " -"will almost never be true in a production installation." -msgstr "" -"This example assumes that you are using NFS for the physical storage, which " -"will almost never be true in a production installation." - -msgid "" -"This guide describes how to install and configure OpenStack for high " -"availability. It supplements the Installation Guides and assumes that you " -"are familiar with the material in those guides." -msgstr "" -"This guide describes how to install and configure OpenStack for High " -"Availability. It supplements the Installation Guides and assumes that you " -"are familiar with the material in those guides." - -msgid "This guide is intended as advice only." -msgstr "This guide is intended as advice only." - -msgid "This guide uses the following example IP addresses:" -msgstr "This guide uses the following example IP addresses:" - -msgid "" -"This guide was last updated as of the Ocata release, documenting the " -"OpenStack Ocata, Newton, and Mitaka releases. It may not apply to EOL " -"releases Kilo and Liberty." -msgstr "" -"This guide was last updated as of the Ocata release, documenting the " -"OpenStack Ocata, Newton, and Mitaka releases. It may not apply to EOL " -"releases Kilo and Liberty." - -msgid "This is the most common option and the one we document here." -msgstr "This is the most common option and the one we document here." - -msgid "" -"This is why setting the quorum to a value less than ``floor(n/2) + 1`` is " -"dangerous. However it may be required for some specific cases, such as a " -"temporary measure at a point it is known with 100% certainty that the other " -"nodes are down." -msgstr "" -"This is why setting the quorum to a value less than ``floor(n/2) + 1`` is " -"dangerous. However it may be required for some specific cases, such as a " -"temporary measure at a point it is known with 100% certainty that the other " -"nodes are down." - -msgid "" -"This scenario can be visualized as below, where each box below represents a " -"cluster of three or more guests." -msgstr "" -"This scenario can be visualised as below, where each box below represents a " -"cluster of three or more guests." - -msgid "This scenario can be visualized as below." -msgstr "This scenario can be visualised as below." - -msgid "" -"This scenario has the advantage of requiring far fewer, if more powerful, " -"machines. Additionally, being part of a single cluster allows you to " -"accurately model the ordering dependencies between components." -msgstr "" -"This scenario has the advantage of requiring far fewer, if more powerful, " -"machines. Additionally, being part of a single cluster allows you to " -"accurately model the ordering dependencies between components." - -msgid "" -"This section discusses ways to protect against data loss in your OpenStack " -"environment." -msgstr "" -"This section discusses ways to protect against data loss in your OpenStack " -"environment." - -msgid "" -"This value increments with each transaction, so the most advanced node has " -"the highest sequence number and therefore is the most up to date." -msgstr "" -"This value increments with each transaction, so the most advanced node has " -"the highest sequence number and therefore is the most up to date." - -msgid "" -"To be sure that all data is highly available, ensure that everything is " -"stored in the MySQL database (which is also highly available):" -msgstr "" -"To be sure that all data is highly available, ensure that everything is " -"stored in the MySQL database (which is also highly available):" - -msgid "" -"To configure AppArmor to work with Galera Cluster, complete the following " -"steps on each cluster node:" -msgstr "" -"To configure AppArmor to work with Galera Cluster, complete the following " -"steps on each cluster node:" - -msgid "" -"To configure SELinux to permit Galera Cluster to operate, you may need to " -"use the ``semanage`` utility to open the ports it uses. For example:" -msgstr "" -"To configure SELinux to permit Galera Cluster to operate, you may need to " -"use the ``semanage`` utility to open the ports it uses. For example:" - -msgid "" -"To configure the number of DHCP agents per network, modify the " -"``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron." -"conf` file. By default this is set to 1. To achieve high availability, " -"assign more than one DHCP agent per network. For more information, see `High-" -"availability for DHCP `_." -msgstr "" -"To configure the number of DHCP agents per network, modify the " -"``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron." -"conf` file. By default this is set to 1. To achieve high availability, " -"assign more than one DHCP agent per network. For more information, see `High-" -"availability for DHCP `_." - -msgid "" -"To enable high availability for configured routers, edit the :file:`/etc/" -"neutron/neutron.conf` file to set the following values:" -msgstr "" -"To enable High Availability for configured routers, edit the :file:`/etc/" -"neutron/neutron.conf` file to set the following values:" - -msgid "" -"To enable the compute agent to run multiple instances simultaneously with " -"workload partitioning, the ``workload_partitioning`` option must be set to " -"``True`` under the `compute section `_ in the :file:`ceilometer.conf` configuration " -"file." -msgstr "" -"To enable the compute agent to run multiple instances simultaneously with " -"workload partitioning, the ``workload_partitioning`` option must be set to " -"``True`` under the `compute section `_ in the :file:`ceilometer.conf` configuration " -"file." - -msgid "" -"To ensure that all queues except those with auto-generated names are " -"mirrored across all running nodes, set the ``ha-mode`` policy key to all by " -"running the following command on one of the nodes:" -msgstr "" -"To ensure that all queues except those with auto-generated names are " -"mirrored across all running nodes, set the ``ha-mode`` policy key to all by " -"running the following command on one of the nodes:" - -msgid "" -"To find the most advanced cluster node, you need to check the sequence " -"numbers, or the ``seqnos``, on the last committed transaction for each. You " -"can find this by viewing ``grastate.dat`` file in database directory:" -msgstr "" -"To find the most advanced cluster node, you need to check the sequence " -"numbers, or the ``seqnos``, on the last committed transaction for each. You " -"can find this by viewing ``grastate.dat`` file in database directory:" - -msgid "" -"To install and configure Memcached, read the `official documentation " -"`_." -msgstr "" -"To install and configure Memcached, read the `official documentation " -"`_." - -msgid "To start the cluster, complete the following steps:" -msgstr "To start the cluster, complete the following steps:" - -msgid "" -"Tooz supports `various drivers `_ including the following back end solutions:" -msgstr "" -"Tooz supports `various drivers `_ including the following back end solutions:" - -msgid "" -"Traditionally, Pacemaker has been positioned as an all-encompassing " -"solution. However, as OpenStack services have matured, they are increasingly " -"able to run in an active/active configuration and gracefully tolerate the " -"disappearance of the APIs on which they depend." -msgstr "" -"Traditionally, Pacemaker has been positioned as an all-encompassing " -"solution. However, as OpenStack services have matured, they are increasingly " -"able to run in an active/active configuration and gracefully tolerate the " -"disappearance of the APIs on which they depend." - -msgid "True" -msgstr "True" - -msgid "" -"Typically, an active/active installation for a stateless service maintains a " -"redundant instance, and requests are load balanced using a virtual IP " -"address and a load balancer such as HAProxy." -msgstr "" -"Typically, an active/active installation for a stateless service maintains a " -"redundant instance, and requests are load balanced using a virtual IP " -"address and a load balancer such as HAProxy." - -msgid "Use HA queues in RabbitMQ (``x-ha-policy: all``):" -msgstr "Use HA queues in RabbitMQ (``x-ha-policy: all``):" - -msgid "" -"Use MySQL/Galera in active/passive mode to avoid deadlocks on ``SELECT ... " -"FOR UPDATE`` type queries (used, for example, by nova and neutron). This " -"issue is discussed in the following:" -msgstr "" -"Use MySQL/Galera in active/passive mode to avoid deadlocks on ``SELECT ... " -"FOR UPDATE`` type queries (used, for example, by Nova and Neutron). This " -"issue is discussed in the following:" - -msgid "Use durable queues in RabbitMQ:" -msgstr "Use durable queues in RabbitMQ:" - -msgid "" -"Use that password to authenticate to the nodes that will make up the cluster:" -msgstr "" -"Use that password to authenticate to the nodes that will make up the cluster:" - -msgid "" -"Use the :command:`corosync-cfgtool` utility with the ``-s`` option to get a " -"summary of the health of the communication rings:" -msgstr "" -"Use the :command:`corosync-cfgtool` utility with the ``-s`` option to get a " -"summary of the health of the communication rings:" - -msgid "" -"Use the :command:`corosync-objctl` utility to dump the Corosync cluster " -"member list:" -msgstr "" -"Use the :command:`corosync-objctl` utility to dump the Corosync cluster " -"member list:" - -msgid "Use these steps to configurate all services using RabbitMQ:" -msgstr "Use these steps to configure all services using RabbitMQ:" - -msgid "Value" -msgstr "Value" - -msgid "Verify that the nodes are running:" -msgstr "Verify that the nodes are running:" - -msgid "Verify the cluster status:" -msgstr "Verify the cluster status:" - -msgid "Virtualized hardware" -msgstr "Virtualised hardware" - -msgid "" -"We advise that you read this at your own discretion when planning on your " -"OpenStack cloud." -msgstr "" -"We advise that you read this at your own discretion when planning on your " -"OpenStack cloud." - -msgid "" -"We do not recommend setting the quorum to a value less than ``floor(n/2) + " -"1`` as it would likely cause a split-brain in a face of network partitions." -msgstr "" -"We do not recommend setting the quorum to a value less than ``floor(n/2) + " -"1`` as it would likely cause a split-brain in a face of network partitions." - -msgid "" -"We recommend HAProxy as the load balancer, however, there are many " -"alternative load balancing solutions in the marketplace." -msgstr "" -"We recommend HAProxy as the load balancer, however, there are many " -"alternative load balancing solutions in the marketplace." - -msgid "" -"We recommend two primary architectures for making OpenStack highly available." -msgstr "" -"We recommend two primary architectures for making OpenStack highly available." - -msgid "" -"We recommended that the maximum latency between any two controller nodes is " -"2 milliseconds. Although the cluster software can be tuned to operate at " -"higher latencies, some vendors insist on this value before agreeing to " -"support the installation." -msgstr "" -"We recommended that the maximum latency between any two controller nodes is " -"2 milliseconds. Although the cluster software can be tuned to operate at " -"higher latencies, some vendors insist on this value before agreeing to " -"support the installation." - -msgid "What is a cluster manager?" -msgstr "What is a cluster manager?" - -msgid "" -"When Ceph RBD is used for ephemeral volumes as well as block and image " -"storage, it supports `live migration `_ of VMs with ephemeral drives. LVM only " -"supports live migration of volume-backed VMs." -msgstr "" -"When Ceph RBD is used for ephemeral volumes as well as block and image " -"storage, it supports `live migration `_ of VMs with ephemeral drives. LVM only " -"supports live migration of volume-backed VMs." - -msgid "" -"When configuring an OpenStack environment for study or demonstration " -"purposes, it is possible to turn off the quorum checking. Production systems " -"should always run with quorum enabled." -msgstr "" -"When configuring an OpenStack environment for study or demonstration " -"purposes, it is possible to turn off the quorum checking. Production systems " -"should always run with quorum enabled." - -msgid "" -"When each cluster node starts, it checks the IP addresses given to the " -"``wsrep_cluster_address`` parameter. It then attempts to establish network " -"connectivity with a database server running there. Once it establishes a " -"connection, it attempts to join the Primary Component, requesting a state " -"transfer as needed to bring itself into sync with the cluster." -msgstr "" -"When each cluster node starts, it checks the IP addresses given to the " -"``wsrep_cluster_address`` parameter. It then attempts to establish network " -"connectivity with a database server running there. Once it establishes a " -"connection, it attempts to join the Primary Component, requesting a state " -"transfer as needed to bring itself into sync with the cluster." - -msgid "" -"When four nodes fail simultaneously, the cluster would continue to function " -"as well. But if split to partitions of three and four nodes respectively, " -"the quorum of three would have made both sides to attempt to fence the other " -"and host resources. Without fencing enabled, it would go straight to running " -"two copies of each resource." -msgstr "" -"When four nodes fail simultaneously, the cluster would continue to function " -"as well. But if split to partitions of three and four nodes respectively, " -"the quorum of three would have made both sides to attempt to fence the other " -"and host resources. Without fencing enabled, it would go straight to running " -"two copies of each resource." - -msgid "" -"When installing highly available OpenStack on VMs, be sure that your " -"hypervisor permits promiscuous mode and disables MAC address filtering on " -"the external network." -msgstr "" -"When installing highly available OpenStack on VMs, be sure that your " -"hypervisor permits promiscuous mode and disables MAC address filtering on " -"the external network." - -msgid "" -"When you finish installing and configuring the OpenStack database, you can " -"initialize the Galera Cluster." -msgstr "" -"When you finish installing and configuring the OpenStack database, you can " -"initialise the Galera Cluster." - -msgid "" -"When you have all cluster nodes started, log into the database client of any " -"cluster node and check the ``wsrep_cluster_size`` status variable again:" -msgstr "" -"When you have all cluster nodes started, log into the database client of any " -"cluster node and check the ``wsrep_cluster_size`` status variable again:" - -msgid "" -"When you start up a cluster (all nodes down) and set ``wait_for_all`` to 1, " -"the cluster quorum is held until all nodes are online and have joined the " -"cluster for the first time. This parameter is new in Corosync 2.0." -msgstr "" -"When you start up a cluster (all nodes down) and set ``wait_for_all`` to 1, " -"the cluster quorum is held until all nodes are online and have joined the " -"cluster for the first time. This parameter is new in Corosync 2.0." - -msgid "" -"When you use high availability, consider the hardware requirements needed " -"for your application." -msgstr "" -"When you use high availability, consider the hardware requirements needed " -"for your application." - -msgid "" -"While SYS-V init replacements like systemd can provide deterministic " -"recovery of a complex stack of services, the recovery is limited to one " -"machine and lacks the context of what is happening on other machines. This " -"context is crucial to determine the difference between a local failure, and " -"clean startup and recovery after a total site failure." -msgstr "" -"While SYS-V init replacements like systemd can provide deterministic " -"recovery of a complex stack of services, the recovery is limited to one " -"machine and lacks the context of what is happening on other machines. This " -"context is crucial to determine the difference between a local failure, and " -"clean start-up and recovery after a total site failure." - -msgid "" -"While all of the configuration parameters available to the standard MySQL, " -"MariaDB, or Percona XtraDB database servers are available in Galera Cluster, " -"there are some that you must define an outset to avoid conflict or " -"unexpected behavior." -msgstr "" -"While all of the configuration parameters available to the standard MySQL, " -"MariaDB, or Percona XtraDB database servers are available in Galera Cluster, " -"there are some that you must define an outset to avoid conflict or " -"unexpected behaviour." - -msgid "" -"While the application can still run after the failure of several instances, " -"it may not have sufficient capacity to serve the required volume of " -"requests. A cluster can automatically recover failed instances to prevent " -"additional load induced failures." -msgstr "" -"While the application can still run after the failure of several instances, " -"it may not have sufficient capacity to serve the required volume of " -"requests. A cluster can automatically recover failed instances to prevent " -"additional load induced failures." - -msgid "" -"With ``secauth`` enabled, Corosync nodes mutually authenticates using a 128-" -"byte shared secret stored in the :file:`/etc/corosync/authkey` file. This " -"can be generated with the :command:`corosync-keygen` utility. Cluster " -"communications are encrypted when using ``secauth``." -msgstr "" -"With ``secauth`` enabled, Corosync nodes mutually authenticates using a 128-" -"byte shared secret stored in the :file:`/etc/corosync/authkey` file. This " -"can be generated with the :command:`corosync-keygen` utility. Cluster " -"communications are encrypted when using ``secauth``." - -msgid "" -"With this in mind, some vendors are restricting Pacemaker's use to services " -"that must operate in an active/passive mode (such as ``cinder-volume``), " -"those with multiple states (for example, Galera), and those with complex " -"bootstrapping procedures (such as RabbitMQ)." -msgstr "" -"With this in mind, some vendors are restricting Pacemaker's use to services " -"that must operate in an active/passive mode (such as ``cinder-volume``), " -"those with multiple states (for example, Galera), and those with complex " -"bootstrapping procedures (such as RabbitMQ)." - -msgid "" -"Within the ``nodelist`` directive, it is possible to specify specific " -"information about the nodes in the cluster. The directive can contain only " -"the node sub-directive, which specifies every node that should be a member " -"of the membership, and where non-default options are needed. Every node must " -"have at least the ``ring0_addr`` field filled." -msgstr "" -"Within the ``nodelist`` directive, it is possible to specify specific " -"information about the nodes in the cluster. The directive can contain only " -"the node sub-directive, which specifies every node that should be a member " -"of the membership, and where non-default options are needed. Every node must " -"have at least the ``ring0_addr`` field filled." - -msgid "" -"Work is in progress on a unified approach, which combines the best aspects " -"of existing upstream solutions. More details are available on `the HA VMs " -"user story wiki `_." -msgstr "" -"Work is in progress on a unified approach, which combines the best aspects " -"of existing upstream solutions. More details are available on `the HA VMs " -"user story wiki `_." - -msgid "" -"You can achieve high availability for the OpenStack database in many " -"different ways, depending on the type of database that you want to use. " -"There are three implementations of Galera Cluster available to you:" -msgstr "" -"You can achieve high availability for the OpenStack database in many " -"different ways, depending on the type of database that you want to use. " -"There are three implementations of Galera Cluster available to you:" - -msgid "" -"You can also ensure the availability by other means, using Keepalived or " -"Pacemaker." -msgstr "" -"You can also ensure the availability by other means, using Keepalived or " -"Pacemaker." - -msgid "" -"You can have up to 16 cluster members (this is currently limited by the " -"ability of corosync to scale higher). In extreme cases, 32 and even up to 64 " -"nodes could be possible. However, this is not well tested." -msgstr "" -"You can have up to 16 cluster members (this is currently limited by the " -"ability of Corosync to scale higher). In extreme cases, 32 and even up to 64 " -"nodes could be possible. However, this is not well tested." - -msgid "" -"You can now add the Pacemaker configuration for Block Storage API resource. " -"Connect to the Pacemaker cluster with the :command:`crm configure` command " -"and add the following cluster resources:" -msgstr "" -"You can now add the Pacemaker configuration for Block Storage API resource. " -"Connect to the Pacemaker cluster with the :command:`crm configure` command " -"and add the following cluster resources:" - -msgid "" -"You can now check the ``corosync`` connectivity with one of these tools." -msgstr "" -"You can now check the ``corosync`` connectivity with one of these tools." - -msgid "" -"You can read more about these concerns on the `Red Hat Bugzilla `_ and there is a `psuedo " -"roadmap `_ " -"for addressing them upstream." -msgstr "" -"You can read more about these concerns on the `Red Hat Bugzilla `_ and there is a `psuedo " -"roadmap `_ " -"for addressing them upstream." - -msgid "" -"You can take periodic snap shots throughout the installation process and " -"roll back to a working configuration in the event of a problem." -msgstr "" -"You can take periodic snap shots throughout the installation process and " -"roll back to a working configuration in the event of a problem." - -msgid "You can use the `ping` command to find the latency between two servers." -msgstr "" -"You can use the `ping` command to find the latency between two servers." - -msgid "" -"You must also create the OpenStack Image API endpoint with this IP address. " -"If you are using both private and public IP addresses, create two virtual IP " -"addresses and define your endpoint. For example:" -msgstr "" -"You must also create the OpenStack Image API endpoint with this IP address. " -"If you are using both private and public IP addresses, create two virtual IP " -"addresses and define your endpoint. For example:" - -msgid "" -"You must configure NTP to properly synchronize services among nodes. We " -"recommend that you configure the controller node to reference more accurate " -"(lower stratum) servers and other nodes to reference the controller node. " -"For more information, see the `Installation Guides `_." -msgstr "" -"You must configure NTP to properly synchronise services among nodes. We " -"recommend that you configure the controller node to reference more accurate " -"(lower stratum) servers and other nodes to reference the controller node. " -"For more information, see the `Installation Guides `_." - -msgid "" -"You must configure a supported Tooz driver for the HA deployment of the " -"Telemetry services." -msgstr "" -"You must configure a supported Tooz driver for the HA deployment of the " -"Telemetry services." - -msgid "You must create the Shared File Systems API endpoint with this IP." -msgstr "You must create the Shared File Systems API endpoint with this IP." - -msgid "" -"You must select and assign a virtual IP address (VIP) that can freely float " -"between cluster nodes." -msgstr "" -"You must select and assign a virtual IP address (VIP) that can freely float " -"between cluster nodes." - -msgid "" -"You must use the same name on every cluster node. The connection fails when " -"this value does not match." -msgstr "" -"You must use the same name on every cluster node. The connection fails when " -"this value does not match." - -msgid "" -"You only need to do this on one cluster node. Galera Cluster replicates the " -"user to all the others." -msgstr "" -"You only need to do this on one cluster node. Galera Cluster replicates the " -"user to all the others." - -msgid "" -"You should see a ``status=joined`` entry for each of your constituent " -"cluster nodes." -msgstr "" -"You should see a ``status=joined`` entry for each of your constituent " -"cluster nodes." - -msgid "" -"You will need to address high availability concerns for any applications " -"software that you run on your OpenStack environment. The important thing is " -"to make sure that your services are redundant and available. How you achieve " -"that is up to you." -msgstr "" -"You will need to address High Availability concerns for any applications " -"software that you run on your OpenStack environment. The important thing is " -"to make sure that your services are redundant and available. How you achieve " -"that is up to you." - -msgid "" -"You would choose this option if you prefer to have fewer but more powerful " -"boxes." -msgstr "" -"You would choose this option if you prefer to have fewer but more powerful " -"boxes." - -msgid "" -"You would choose this option if you prefer to have more but less powerful " -"boxes." -msgstr "" -"You would choose this option if you prefer to have more but less powerful " -"boxes." - -msgid "" -"Your OpenStack services must now point their Block Storage API configuration " -"to the highly available, virtual cluster IP address rather than a Block " -"Storage API server’s physical IP address as you would for a non-HA " -"environment." -msgstr "" -"Your OpenStack services must now point their Block Storage API configuration " -"to the highly available, virtual cluster IP address rather than a Block " -"Storage API server’s physical IP address as you would for a non-HA " -"environment." - -msgid "" -"Your OpenStack services must now point their OpenStack Image API " -"configuration to the highly available, virtual cluster IP address instead of " -"pointing to the physical IP address of an OpenStack Image API server as you " -"would in a non-HA cluster." -msgstr "" -"Your OpenStack services must now point their OpenStack Image API " -"configuration to the highly available, virtual cluster IP address instead of " -"pointing to the physical IP address of an OpenStack Image API server as you " -"would in a non-HA cluster." - -msgid "" -"Your OpenStack services must now point their Shared File Systems API " -"configuration to the highly available, virtual cluster IP address rather " -"than a Shared File Systems API server’s physical IP address as you would for " -"a non-HA environment." -msgstr "" -"Your OpenStack services must now point their Shared File Systems API " -"configuration to the highly available, virtual cluster IP address rather " -"than a Shared File Systems API server’s physical IP address as you would for " -"a non-HA environment." - -msgid "" -"Your OpenStack services now point their OpenStack Identity configuration to " -"the highly available virtual cluster IP address." -msgstr "" -"Your OpenStack services now point their OpenStack Identity configuration to " -"the highly available virtual cluster IP address." - -msgid "[TODO: need more discussion of these parameters]" -msgstr "[TODO: need more discussion of these parameters]" - -msgid "" -"`Ceph RBD `_ is an innately high availability storage " -"back end. It creates a storage cluster with multiple nodes that communicate " -"with each other to replicate and redistribute data dynamically. A Ceph RBD " -"storage cluster provides a single shared set of storage nodes that can " -"handle all classes of persistent and ephemeral data (glance, cinder, and " -"nova) that are required for OpenStack instances." -msgstr "" -"`Ceph RBD `_ is an innately High Availability storage " -"back end. It creates a storage cluster with multiple nodes that communicate " -"with each other to replicate and redistribute data dynamically. A Ceph RBD " -"storage cluster provides a single shared set of storage nodes that can " -"handle all classes of persistent and ephemeral data (Glance, Cinder, and " -"Nova) that are required for OpenStack instances." - -msgid "`Clustering Guide `_" -msgstr "`Clustering Guide `_" - -msgid "`Debian and Ubuntu `_" -msgstr "`Debian and Ubuntu `_" - -msgid "" -"`Galera Cluster for MySQL `_: The MySQL reference " -"implementation from Codership, Oy." -msgstr "" -"`Galera Cluster for MySQL `_: The MySQL reference " -"implementation from Codership, Oy." - -msgid "`Highly Available Queues `_" -msgstr "`Highly Available Queues `_" - -msgid "" -"`IMPORTANT: MySQL Galera does *not* support SELECT ... FOR UPDATE `_" -msgstr "" -"`IMPORTANT: MySQL Galera does *not* support SELECT ... FOR UPDATE `_" - -msgid "" -"`MariaDB Galera Cluster `_: The MariaDB implementation " -"of Galera Cluster, which is commonly supported in environments based on Red " -"Hat distributions." -msgstr "" -"`MariaDB Galera Cluster `_: The MariaDB implementation " -"of Galera Cluster, which is commonly supported in environments based on Red " -"Hat distributions." - -msgid "`Memcached `_:" -msgstr "`Memcached `_:" - -msgid "" -"`OCF RAs `_, as used by Red Hat and SUSE" -msgstr "" -"`OCF RAs `_, as used by Red Hat and SUSE" - -msgid "" -"`Pacemaker `_ cluster stack is a state-of-the-art " -"high availability and load balancing stack for the Linux platform. Pacemaker " -"is used to make OpenStack infrastructure highly available." -msgstr "" -"`Pacemaker `_ cluster stack is a state-of-the-art " -"high availability and load balancing stack for the Linux platform. Pacemaker " -"is used to make OpenStack infrastructure highly available." - -msgid "" -"`Percona XtraDB Cluster `_: The XtraDB " -"implementation of Galera Cluster from Percona." -msgstr "" -"`Percona XtraDB Cluster `_: The XtraDB " -"implementation of Galera Cluster from Percona." - -msgid "" -"`RPM based `_ (RHEL, Fedora, " -"CentOS, openSUSE)" -msgstr "" -"`RPM based `_ (RHEL, Fedora, " -"CentOS, openSUSE)" - -msgid "`Redis `_:" -msgstr "`Redis `_:" - -msgid "" -"`Understanding reservations, concurrency, and locking in Nova `_" -msgstr "" -"`Understanding reservations, concurrency, and locking in Nova `_" - -msgid "`Zookeeper `_:" -msgstr "`Zookeeper `_:" - -msgid "``crmsh``" -msgstr "``crmsh``" - -msgid "" -"``last_man_standing_window`` specifies the time, in milliseconds, required " -"to recalculate quorum after one or more hosts have been lost from the " -"cluster. To perform a new quorum recalculation, the cluster must have quorum " -"for at least the interval specified for ``last_man_standing_window``. The " -"default is 10000ms." -msgstr "" -"``last_man_standing_window`` specifies the time, in milliseconds, required " -"to recalculate quorum after one or more hosts have been lost from the " -"cluster. To perform a new quorum recalculation, the cluster must have quorum " -"for at least the interval specified for ``last_man_standing_window``. The " -"default is 10000ms." - -msgid "" -"``nodeid`` is optional when using IPv4 and required when using IPv6. This is " -"a 32-bit value specifying the node identifier delivered to the cluster " -"membership service. If this is not specified with IPv4, the node ID is " -"determined from the 32-bit IP address of the system to which the system is " -"bound with ring identifier of 0. The node identifier value of zero is " -"reserved and should not be used." -msgstr "" -"``nodeid`` is optional when using IPv4 and required when using IPv6. This is " -"a 32-bit value specifying the node identifier delivered to the cluster " -"membership service. If this is not specified with IPv4, the node ID is " -"determined from the 32-bit IP address of the system to which the system is " -"bound with ring identifier of 0. The node identifier value of zero is " -"reserved and should not be used." - -msgid "``pcs``" -msgstr "``pcs``" - -msgid "" -"``ring{X}_addr`` specifies the IP address of one of the nodes. ``{X}`` is " -"the ring number." -msgstr "" -"``ring{X}_addr`` specifies the IP address of one of the nodes. ``{X}`` is " -"the ring number." - -msgid "" -"`a mistral-based auto-recovery workflow `_, by Intel" -msgstr "" -"`a mistral-based auto-recovery workflow `_, by Intel" - -msgid "`corosync`" -msgstr "`corosync`" - -msgid "`fence-agents` (CentOS or RHEL) or cluster-glue" -msgstr "`fence-agents` (CentOS or RHEL) or cluster-glue" - -msgid "`libqb0`" -msgstr "`libqb0`" - -msgid "`masakari `_, by NTT" -msgstr "`masakari `_, by NTT" - -msgid "`pacemaker`" -msgstr "`pacemaker`" - -msgid "`pcs` (CentOS or RHEL) or crmsh" -msgstr "`pcs` (CentOS or RHEL) or crmsh" - -msgid "`resource-agents`" -msgstr "`resource-agents`" - -msgid "allow_automatic_l3agent_failover" -msgstr "allow_automatic_l3agent_failover" - -msgid "compute node" -msgstr "compute node" - -msgid "controller node" -msgstr "controller node" - -msgid "l3_ha" -msgstr "l3_ha" - -msgid "max_l3_agents_per_router" -msgstr "max_l3_agents_per_router" - -msgid "min_l3_agents_per_router" -msgstr "min_l3_agents_per_router" diff --git a/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po b/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po deleted file mode 100644 index 958b955c46..0000000000 --- a/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po +++ /dev/null @@ -1,3837 +0,0 @@ -# Akihiro Motoki , 2016. #zanata -# KATO Tomoyuki , 2016. #zanata -# Yuta Hono , 2016. #zanata -# KATO Tomoyuki , 2017. #zanata -msgid "" -msgstr "" -"Project-Id-Version: openstackhaguide\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2018-08-22 22:08+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-03-22 02:26+0000\n" -"Last-Translator: KATO Tomoyuki \n" -"Language-Team: Japanese\n" -"Language: ja\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -msgid "**Cluster Address**: List the IP addresses for each cluster node." -msgstr "**クラスターアドレス**: 各クラスターノードの IP アドレスを表示します。" - -msgid "**Cluster Name**: Define an arbitrary name for your cluster." -msgstr "**クラスター名**: 任意のクラスターの名前を定義します。" - -msgid "" -"**Corosync configuration file fragment for unicast (``corosync.conf``)**" -msgstr "**ユニキャスト向け Corosync 設定ファイルの断片 (``corosync.conf``)**" - -msgid "" -"**Example Corosync configuration file for multicast (``corosync.conf``)**" -msgstr "**マルチキャスト用の Corosync 設定ファイル例 (``corosync.conf``)**" - -msgid "**Node Address**: Define the IP address of the cluster node." -msgstr "**ノードアドレス**: クラスターノードの IP アドレスを定義します。" - -msgid "**Node Name**: Define the logical name of the cluster node." -msgstr "**ノード名**: クラスターノードの論理名を定義します。" - -msgid "" -"**wsrep Provider**: The Galera Replication Plugin serves as the ``wsrep`` " -"provider for Galera Cluster. It is installed on your system as the " -"``libgalera_smm.so`` file. Define the path to this file in your ``my.cnf``:" -msgstr "" -"**wsrep Provider**: Galera Replication Plugin は、Galera Cluster の " -"``wsrep`` プロバイダーとして動作します。お使いのシステムに ``libgalera_smm." -"so`` ファイルとしてインストールされます。このファイルへのパスを ``my.cnf`` に" -"定義します。" - -msgid "/etc/neutron/neutron.conf parameters for high availability" -msgstr "高可用性のための /etc/neutron/neutron.conf のパラメーター" - -msgid "12 GB" -msgstr "12 GB" - -msgid "12+ GB" -msgstr "12+ GB" - -msgid "120 GB" -msgstr "120 GB" - -msgid "120+ GB" -msgstr "120+ GB" - -msgid "2" -msgstr "2" - -msgid "2 or more" -msgstr "2 以上" - -msgid "4" -msgstr "4" - -msgid "8+" -msgstr "8+" - -msgid ":doc:`Networking DHCP agent`" -msgstr ":doc:`Networking DHCP エージェント `" - -msgid ":doc:`Neutron L3 agent`" -msgstr ":doc:`Networking L3 エージェント `" - -msgid "" -":ref:`Configure OpenStack services to use RabbitMQ HA queues `" -msgstr "" -":ref:`Configure OpenStack services to use RabbitMQ HA queues `" - -msgid ":ref:`Configure RabbitMQ for HA queues`" -msgstr ":ref:`高可用性 キュー用の RabbitMQ の設定 `" - -msgid ":ref:`Install RabbitMQ`" -msgstr ":ref:`RabbitMQ のインストール`" - -msgid ":ref:`corosync-multicast`" -msgstr ":ref:`corosync-multicast`" - -msgid ":ref:`corosync-unicast`" -msgstr ":ref:`corosync-unicast`" - -msgid ":ref:`corosync-votequorum`" -msgstr ":ref:`corosync-votequorum`" - -msgid ":ref:`glance-api-configure`" -msgstr ":ref:`glance-api-configure`" - -msgid ":ref:`glance-api-pacemaker`" -msgstr ":ref:`glance-api-pacemaker`" - -msgid ":ref:`glance-services`" -msgstr ":ref:`glance-services`" - -msgid ":ref:`ha-blockstorage-configure`" -msgstr ":ref:`ha-blockstorage-configure`" - -msgid ":ref:`ha-blockstorage-pacemaker`" -msgstr ":ref:`ha-blockstorage-pacemaker`" - -msgid ":ref:`ha-blockstorage-services`" -msgstr ":ref:`ha-blockstorage-services`" - -msgid ":ref:`ha-sharedfilesystems-configure`" -msgstr ":ref:`ha-sharedfilesystems-configure`" - -msgid ":ref:`ha-sharedfilesystems-pacemaker`" -msgstr ":ref:`ha-sharedfilesystems-pacemaker`" - -msgid ":ref:`ha-sharedfilesystems-services`" -msgstr ":ref:`ha-sharedfilesystems-services`" - -msgid ":ref:`identity-config-identity`" -msgstr ":ref:`identity-config-identity`" - -msgid ":ref:`identity-pacemaker`" -msgstr ":ref:`identity-pacemaker`" - -msgid ":ref:`identity-services-config`" -msgstr ":ref:`identity-services-config`" - -msgid ":ref:`pacemaker-cluster-properties`" -msgstr ":ref:`pacemaker-cluster-properties`" - -msgid ":ref:`pacemaker-corosync-setup`" -msgstr ":ref:`pacemaker-corosync-setup`" - -msgid ":ref:`pacemaker-corosync-start`" -msgstr ":ref:`pacemaker-corosync-start`" - -msgid ":ref:`pacemaker-install`" -msgstr ":ref:`pacemaker-install`" - -msgid ":ref:`pacemaker-start`" -msgstr ":ref:`pacemaker-start`" - -msgid "" -":term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack internal " -"stateful communication service." -msgstr "" -":term:`Advanced Message Queuing Protocol (AMQP)` は、OpenStack 内部のステート" -"フルな通信サービスを提供します。" - -msgid ":term:`active/active configuration`" -msgstr ":term:`アクティブ/アクティブ設定 `" - -msgid ":term:`active/passive configuration`" -msgstr ":term:`アクティブ/パッシブ設定 `" - -msgid "" -"A crucial aspect of high availability is the elimination of single points of " -"failure (SPOFs). A SPOF is an individual piece of equipment or software that " -"causes system downtime or data loss if it fails. In order to eliminate " -"SPOFs, check that mechanisms exist for redundancy of:" -msgstr "" -"高可用性の重要な側面は、単一障害点 (SPOF) を減らすことです。SPOF は、障害が発" -"生した場合にシステム停止やデータ損失を引き起こす、設備やソフトウェアの個々の" -"部品です。SPOF を削減するために、以下の冗長性に対するメカニズムを確認します。" - -msgid "A minimum of three hosts" -msgstr "最小 3 ノード" - -msgid "" -"A sample votequorum service configuration in the :file:`corosync.conf` file " -"is:" -msgstr ":file:`corosync.conf` ファイルの votequorum サービス設定例:" - -msgid "" -"A service that provides a response after your request and then requires no " -"further attention. To make a stateless service highly available, you need to " -"provide redundant instances and load balance them. OpenStack services that " -"are stateless include ``nova-api``, ``nova-conductor``, ``glance-api``, " -"``keystone-api``, ``neutron-api``, and ``nova-scheduler``." -msgstr "" -"リクエストに応答して、その後さらなる注意を必要としないサービス。ステートレス" -"なサービスを高可用化するために、複数のインスタンスを配備して、負荷分散する必" -"要があります。ステートレスな OpenStack サービスに ``nova-api``、``nova-" -"conductor``、``glance-api``、``keystone-api``、``neutron-api``、``nova-" -"scheduler`` があります。" - -msgid "" -"A service where subsequent requests to the service depend on the results of " -"the first request. Stateful services are more difficult to manage because a " -"single action typically involves more than one request. Providing additional " -"instances and load balancing does not solve the problem. For example, if the " -"horizon user interface reset itself every time you went to a new page, it " -"would not be very useful. OpenStack services that are stateful include the " -"OpenStack database and message queue. Making stateful services highly " -"available can depend on whether you choose an active/passive or active/" -"active configuration." -msgstr "" -"最初のリクエストの結果に応じて、後続のリクエストがあるサービス。ステートフル" -"サービスは、あるアクションが一般的に複数のリクエストに影響するため、管理する" -"ことが難しいです。追加インスタンスを配備して負荷分散するだけでは、問題を解決" -"できません。例えば、horizon ユーザーインターフェースが、新しいページを開くた" -"びに毎回リセットされると、ほとんど役に立たないでしょう。ステートフルな " -"OpenStack サービスには、OpenStack のデータベース、メッセージキューがありま" -"す。ステートレスなサービスの高可用化には、アクティブ/パッシブまたはアクティ" -"ブ/アクティブな設定のどちらを選択するかに依存する可能性があります。" - -msgid "" -"A shared implementation and calculation of `quorum `_" -msgstr "" -"`クォーラム `_ の" -"共有実装と計算" - -msgid "" -"A single application does not have sufficient context to know the difference " -"between failure of a machine and failure of the application on a machine. " -"The usual practice is to assume the machine is dead and continue working, " -"however this is highly risky. A rogue process or machine could still be " -"responding to requests and generally causing havoc. The safer approach is to " -"make use of remotely accessible power switches and/or network switches and " -"SAN controllers to fence (isolate) the machine before continuing." -msgstr "" -"単一アプリケーションは、マシンの障害とマシン上のアプリケーション障害の違いを" -"十分に理解できません。一般的なプラクティスは、マシンが停止したと仮定して、動" -"作し続けることです。しかしながら、これは非常にリスクがあります。はぐれたプロ" -"セスやマシンがリクエストに応答し続け、一般的に大破壊を引き起こし続ける可能性" -"があります。より安全なアプローチは、継続する前にマシンをフェンス (隔離) する" -"ために、リモートアクセス可能な電源スイッチ、ネットワークスイッチ、SAN コント" -"ローラーを使用することです。" - -msgid "" -"A typical active/active installation for a stateful service includes " -"redundant services, with all instances having an identical state. In other " -"words, updates to one instance of a database update all other instances. " -"This way a request to one instance is the same as a request to any other. A " -"load balancer manages the traffic to these systems, ensuring that " -"operational systems always handle the request." -msgstr "" -"一般的にステートレスサービスをアクティブ / アクティブにインストールすること" -"は、すべてのインスタンスが同じ状態を持つ冗長なサービスになることを含みます。" -"別の言い方をすると、あるインスタンスのデータベースの更新は、他のすべてのイン" -"スタンスも更新されます。このように、あるインスタンスへのリクエストは、他への" -"リクエストと同じです。ロードバランサーがこれらのシステムのトラフィックを管理" -"し、利用可能なシステムが常にリクエストを確実に処理します。" - -msgid "" -"A typical active/passive installation for a stateful service maintains a " -"replacement resource that can be brought online when required. Requests are " -"handled using a :term:`virtual IP address (VIP)` that facilitates returning " -"to service with minimal reconfiguration. A separate application (such as " -"Pacemaker or Corosync) monitors these services, bringing the backup online " -"as necessary." -msgstr "" -"一般的にステートレスサービスをアクティブ / パッシブにインストールすると、必要" -"に応じてオンラインにできる置換リソースを維持します。リクエストは、サービスの" -"最小限の再設定により返す機能を持つ :term:`仮想 IP アドレス ` を使用して処理されます。 独立したアプリケーション (Pacemaker " -"や Corosync など) がこれらのサービスを監視し、必要に応じてバックアップ側をオ" -"ンラインにします。" - -msgid "API isolation" -msgstr "API 分離" - -msgid "Abstract" -msgstr "概要" - -msgid "" -"Access to Memcached is not handled by HAProxy because replicated access is " -"currently in an experimental state. Instead, OpenStack services must be " -"supplied with the full list of hosts running Memcached." -msgstr "" -"重複アクセスは現在実験的な位置づけのため、Memcached へのアクセスは HAproxy を" -"利用しません。代わりに、OpenStack のサービスは Memcached を実行しているホスト" -"をすべて指定する必要があります。" - -msgid "Active/passive versus active/active" -msgstr "アクティブ/パッシブとアクティブ/アクティブ" - -msgid "Add Block Storage API resource to Pacemaker" -msgstr "Block Storage API リソースの Pacemaker への追加" - -msgid "" -"Add HAProxy to the cluster and ensure the VIPs can only run on machines " -"where HAProxy is active:" -msgstr "" -"HAProxy をクラスターに追加して、仮想 IP が HAProxy の動作しているマシンにおい" -"てのみ動作できることを確認します。" - -msgid "Add OpenStack Identity resource to Pacemaker" -msgstr "OpenStack Identity リソースの Pacemaker への追加" - -msgid "Add OpenStack Image API resource to Pacemaker" -msgstr "OpenStack Image API リソースの Pacemaker への追加" - -msgid "Add Shared File Systems API resource to Pacemaker" -msgstr "Shared File Systems API リソースの Pacemaker への追加" - -msgid "" -"Add the Pacemaker configuration for the OpenStack Identity resource by " -"running the following command to connect to the Pacemaker cluster:" -msgstr "" -"ここで以下のコマンドを使用して、Pacemaker クラスターに接続することにより、" -"OpenStack Identity リソース向けに Pacemaker の設定を追加します。" - -msgid "" -"Add the Pacemaker configuration for the OpenStack Image API resource. Use " -"the following command to connect to the Pacemaker cluster:" -msgstr "" -"ここで OpenStack Image API リソース向けに Pacemaker の設定を追加します。以下" -"のコマンドを使用して、Pacemaker クラスターに接続します。" - -msgid "" -"Add the Pacemaker configuration for the Shared File Systems API resource. " -"Connect to the Pacemaker cluster with the following command:" -msgstr "" -"Shared File Systems API リソース用の Pacemaker 設定を追加します。以下のコマン" -"ドを使用して Pacemaker クラスターに接続します。" - -msgid "Add the following cluster resources:" -msgstr "以下のクラスターリソースを追加します。" - -msgid "Additional parameters" -msgstr "追加パラメーター" - -msgid "" -"After installing the Corosync package, you must create the :file:`/etc/" -"corosync/corosync.conf` configuration file." -msgstr "" -"Corosync パッケージのインストール後、 :file:`/etc/corosync/corosync.conf` 設" -"定ファイルを作成する必要があります。" - -msgid "" -"After the ``corosync`` service have been started and you have verified that " -"the cluster is communicating properly, you can start :command:`pacemakerd`, " -"the Pacemaker master control process. Choose one from the following four " -"ways to start it:" -msgstr "" -"``corosync`` サービスが起動して、クラスターが正常に通信していることを確認した" -"後、Pacemaker のマスター制御プロセス :command:`pacemakerd` を起動できます。以" -"下の 4 通りの方法からどれかを選択してください。" - -msgid "" -"After the ``pacemaker`` service has started, Pacemaker creates a default " -"empty cluster configuration with no resources. Use the :command:`crm_mon` " -"utility to observe the status of ``pacemaker``:" -msgstr "" -"``pacemaker`` サービスの起動後、Pacemaker がリソースを持たないデフォルトの空" -"クラスターを作成します。 :command:`crm_mon` ユーティリティーを使用して、" -"``pacemaker`` の状態を確認します。" - -msgid "After you make these changes, commit the updated configuration." -msgstr "これらの変更実行後、更新した設定を反映します。" - -msgid "" -"After you set up your Pacemaker cluster, set a few basic cluster properties:" -msgstr "" -"Pacemaker クラスターのセットアップ後、いくつかの基本的なクラスターのプロパ" -"ティーを設定します。" - -msgid "All routers are highly available by default." -msgstr "すべてのルーターは、デフォルトで高可用性になっています。" - -msgid "" -"Almost all services in this stack benefit from being proxied. Using a proxy " -"server provides the following capabilities:" -msgstr "" -"このスタックのほぼすべてのサービスは、プロキシーする恩恵を受けられます。プロ" -"キシーサーバを使用することにより、以下の機能が提供されます。" - -msgid "" -"Alternatively, if the database server is running, use the " -"``wsrep_last_committed`` status variable:" -msgstr "" -"代わりに、データベースサーバーが動作している場合、 ``wsrep_last_committed`` " -"状態変数を使用します。" - -msgid "" -"Alternatively, instead of using systemd agents, download and install the OCF " -"resource agent:" -msgstr "" -"または、systemd エージェントを使用する代わりに、OCF リソースエージェントをダ" -"ウンロードしてインストールします。" - -msgid "" -"Alternatively, make modifications using the ``firewall-cmd`` utility for " -"FirewallD that is available on many Linux distributions:" -msgstr "" -"代わりに、多くの Linux ディストリビューションにおいて利用できる FirewallD 向" -"けの ``firewall-cmd`` ユーティリティーを使用して変更することもできます。" - -msgid "" -"Alternatively, you can use a commercial load balancer, which is hardware or " -"software. We recommend a hardware load balancer as it generally has good " -"performance." -msgstr "" -"代わりに、ハードウェアやソフトウェアの商用ロードバランサーを使用することもで" -"きます。ハードウェアロードバランサーは一般的に高性能なので、推奨されます。" - -msgid "Alternatively:" -msgstr "他の" - -msgid "" -"An AMQP (Advanced Message Queuing Protocol) compliant message bus is " -"required for most OpenStack components in order to coordinate the execution " -"of jobs entered into the system." -msgstr "" -"AMQP (Advanced Message Queuing Protocol) 互換メッセージバスが、システム内の" -"ジョブ実行を調整するために、ほとんどの OpenStack コンポーネントに必要となりま" -"す。" - -msgid "An OpenStack environment includes multiple data pools for the VMs:" -msgstr "OpenStack 環境は、仮想マシン向けの複数のデータプールがあります。" - -msgid "" -"And the quorum could also have been set to three, just as a configuration " -"example." -msgstr "また、クォーラムが、設定例にあるように 3 つに設定されているでしょう。" - -msgid "AppArmor" -msgstr "AppArmor" - -msgid "AppArmor now permits Galera Cluster to operate." -msgstr "AppArmor により Galera Cluster の動作を許可されます。" - -msgid "Appendix" -msgstr "付録" - -msgid "" -"Application Armor is a kernel module for improving security on Linux " -"operating systems. It is developed by Canonical and commonly used on Ubuntu-" -"based distributions. In the context of Galera Cluster, systems with AppArmor " -"may block the database service from operating normally." -msgstr "" -"Application Armor は、Linux オペレーティングシステムにおいてセキュリティーを" -"向上するためのカーネルモジュールです。Canonical により開発され、一般的に " -"Ubuntu 系のディストリビューションにおいて使用されています。Galera Cluster の" -"観点では、AppArmor を有効化したシステムは、データベースサービスが正常に動作す" -"ることを妨げる可能性があります。" - -msgid "Applications and automatic service migration" -msgstr "アプリケーションおよびサービスの自動的なマイグレーション" - -msgid "" -"As another option to make RabbitMQ highly available, RabbitMQ contains the " -"OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. " -"It provides the active/active RabbitMQ cluster with mirrored queues. For " -"more information, see `Auto-configuration of a cluster with a Pacemaker " -"`_." -msgstr "" -"RabbitMQ を高可用化する別の選択肢として、RabbitMQ バージョン 3.5.7 以降、" -"Pacemaker クラスターリソースエージェント向けの OCF スクリプトが含まれます。ア" -"クティブ/アクティブ RabbitMQ クラスターにミラーキューを提供します。詳細は " -"`Auto-configuration of a cluster with a Pacemaker `_ を参照してください。" - -msgid "" -"As of September 2016, the OpenStack High Availability community is designing " -"and developing an official and unified way to provide high availability for " -"instances. We are developing automatic recovery from failures of hardware or " -"hypervisor-related software on the compute node, or other failures that " -"could prevent instances from functioning correctly, such as, issues with a " -"cinder volume I/O path." -msgstr "" -"2016年9月時点、OpenStack High Availability コミュニティーは、インスタンスの高" -"可用性を提供するために、公式な統一された方法を設定および開発しています。ハー" -"ドウェアやハイパーバイザー関連ソフトウェアの障害、cinder ボリュームの I/O パ" -"スに関する問題のように、インスタンスが正常に動作しないような他の障害から自動" -"的に復旧する方法を開発しています。" - -msgid "" -"At its core, a cluster is a distributed finite state machine capable of co-" -"ordinating the startup and recovery of inter-related services across a set " -"of machines." -msgstr "" -"クラスターは、その中心において、複数のセットのマシン間で関連するサービスのス" -"タートアップとリカバリーを調整する機能を持つ、分散有限状態マシンです。" - -msgid "Automated recovery of failed instances" -msgstr "障害インスタンスの自動復旧" - -msgid "Awareness of instances on other machines" -msgstr "他のマシンにあるインスタンスの把握" - -msgid "Awareness of other applications in the stack" -msgstr "スタックにある他のアプリケーションの認識" - -msgid "" -"Bear in mind, leaving SELinux in permissive mode is not a good security " -"practice. Over the longer term, you need to develop a security policy for " -"Galera Cluster and then switch SELinux back into enforcing mode." -msgstr "" -"SELinux を permissive モードにすることは、良いセキュリティー慣行ではないこと" -"を覚えておいてください。長い間、Galera Cluster のセキュリティーポリシーを開発" -"して、SELinux を enforcing モードに切り替える必要があります。" - -msgid "" -"Before beginning, ensure you have read the `OpenStack Identity service " -"getting started documentation `_." -msgstr "" -"進める前に `OpenStack Identity サービスの概要 `_ をきちんと読んでください。" - -msgid "" -"Before following this guide to configure the highly available OpenStack " -"cluster, ensure the IP ``10.0.0.11`` and hostname ``controller`` are not in " -"use." -msgstr "" -"このガイドを読み進める前に、高可用性 OpenStack クラスターが IP アドレス " -"``10.0.0.11`` とホスト名 ``controller`` を使わないよう設定してください。" - -msgid "" -"Before you launch Galera Cluster, you need to configure the server and the " -"database to operate as part of the cluster." -msgstr "" -"Galera クラスターを起動する前に、クラスターの一部として動作するよう、サーバー" -"とデータベースを設定する必要があります。" - -msgid "" -"Both the central and the compute agent can run in an HA deployment. This " -"means that multiple instances of these services can run in parallel with " -"workload partitioning among these running instances." -msgstr "" -"中央エージェントとコンピュートエージェントの両方は、高可用性で動作できます。" -"これらのサービスの複数のインスタンスが、これらを実行しているインスタンス間で" -"並行して負荷分散できることを意味します。" - -msgid "" -"Both use a cluster manager, such as Pacemaker or Veritas, to orchestrate the " -"actions of the various services across a set of machines. Because we are " -"focused on FOSS, we refer to these as Pacemaker architectures." -msgstr "" -"どちらも Pacemaker や Veritas のようなクラスターマネージャーを使用して、複数" -"のマシンにまたがるさまざまなサービスの動作を協調させます。私たちは FOSS に注" -"力しているため、Pacemaker のアーキテクチャーを参照します。" - -msgid "" -"By default, STONITH is enabled in Pacemaker, but STONITH mechanisms (to " -"shutdown a node via IPMI or ssh) are not configured. In this case Pacemaker " -"will refuse to start any resources. For production cluster it is recommended " -"to configure appropriate STONITH mechanisms. But for demo or testing " -"purposes STONITH can be disabled completely as follows:" -msgstr "" -"デフォルトでは、STONITH は Pacemaker で有効化されていますが、Pacemaker メカニ" -"ズム (IPMI や SSH 経由のノードのシャットダウン) は設定されていません。この場" -"合、Pacemaker はリソースの開始をすべて拒否します。本番環境のクラスターは、適" -"切な STONITH メカニズムを設定することが推奨されます。デモ目的やテスト目的の場" -"合、STONITH は以下のとおり完全に無効化できます。" - -msgid "" -"By default, ``controller1`` handles the caching service. If the host goes " -"down, ``controller2`` or ``controller3`` will complete the service." -msgstr "" -"デフォルトで ``controller1`` がキャッシュサービスを処理します。そのホストが停" -"止している場合、 ``controller2`` または ``controller3`` がサービスを実施しま" -"す。" - -msgid "" -"By default, cluster nodes do not start as part of a Primary Component. In " -"the Primary Component, replication and state transfers bring all databases " -"to the same state." -msgstr "" -"クラスターノードは、デフォルトで Primary Component の一部として起動しません。" -"Primary Component において、レプリケーションと状態転送により、すべてのデータ" -"ベースが同じ状態になります。" - -msgid "" -"By sending all API access through the proxy, you can clearly identify " -"service interdependencies. You can also move them to locations other than " -"``localhost`` to increase capacity if the need arises." -msgstr "" -"すべての API アクセスをプロキシー経由で送信することにより、サービスの相互依存" -"関係を明確に識別できます。キャパシティーを必要に応じて増やすために、それらを " -"``localhost`` から別の場所に移動できます。" - -msgid "Ceph" -msgstr "Ceph" - -msgid "" -"Ceph RBD provides object replication capabilities by storing Block Storage " -"volumes as Ceph RBD objects. Ceph RBD ensures that each replica of an object " -"is stored on a different node. This means that your volumes are protected " -"against hard drive and node failures, or even the failure of the data center " -"itself." -msgstr "" -"Ceph RBD は、Ceph RBD オブジェクトとして Block Storage のボリュームを保存する" -"ことにより、オブジェクトレプリケーション機能を提供します。オブジェクトの各レ" -"プリカが別々のノードに保存されることを保証します。このことは、お使いのボ" -"リュームがハードディスクやノードの障害時、データセンター自体の障害時にも保護" -"されることを意味します。" - -msgid "" -"Certain services running on the underlying operating system of your " -"OpenStack database may block Galera Cluster from normal operation or prevent " -"``mysqld`` from achieving network connectivity with the cluster." -msgstr "" -"OpenStack データベースのベースとなるオペレーティングシステムで動作している特" -"定のサービスは、Galera Cluster が通常の動作をブロックしたり、``mysqld`` がク" -"ラスターとのネットワーク接続を妨害したりする可能性があります。" - -msgid "Change the number of expected votes for a cluster to be quorate" -msgstr "クラスターが定数になるために期待されるボート数を変更します" - -msgid "Change the number of votes assigned to a node" -msgstr "ノードに割り当てられたボート数を変更します" - -msgid "" -"Cinder provides Block-Storage-as-a-Service suitable for performance " -"sensitive scenarios such as databases, expandable file systems, or providing " -"a server with access to raw block level storage." -msgstr "" -"Cinder は、データベースなどの性能を必要とするシナリオ、拡張可能なファイルシス" -"テム、ローブロックレベルストレージにアクセスするサーバーに適するサービスとし" -"て Block-Storage-as-a-Service を提供します。" - -msgid "Clusters and quorums" -msgstr "クラスターとクォーラム" - -msgid "" -"Clusters with an even number of hosts suffer from similar issues. A single " -"network failure could easily cause a N:N split where neither side retains a " -"majority. For this reason, we recommend an odd number of cluster members " -"when scaling up." -msgstr "" -"偶数のホストを持つクラスターは、同じような問題に苦しみます。単一のネットワー" -"ク障害により、どちらの側も多数派になれない N:N 分断を簡単に引き起こす可能性が" -"あります。この理由により、スケールアップするとき、奇数個のクラスターメンバー" -"を推奨します。" - -msgid "Collapsed" -msgstr "Collapsed" - -msgid "" -"Commit your configuration changes by entering the following command from " -"the :command:`crm configure` menu:" -msgstr "" -":command:`crm configure` メニューから以下のコマンドを実行して、設定の変更を反" -"映します。" - -msgid "" -"Commit your configuration changes from the :command:`crm configure` menu " -"with the following command:" -msgstr "" -":command:`crm configure` メニューから以下のコマンドを入力して、設定の変更をコ" -"ミットします。" - -msgid "Common deployment architectures" -msgstr "一般的な配備のアーキテクチャー" - -msgid "Configuration" -msgstr "設定" - -msgid "Configuration tips" -msgstr "設定のヒント" - -msgid "Configure Block Storage API service" -msgstr "Block Storage API サービスの設定" - -msgid "Configure NTP" -msgstr "NTP の設定" - -msgid "Configure OpenStack Identity service" -msgstr "OpenStack Identity Service の設定" - -msgid "Configure OpenStack Image service API" -msgstr "OpenStack Image サービス API の設定" - -msgid "Configure OpenStack services to use HA Shared File Systems API" -msgstr "" -"高可用性 Shared File Systems API を使用するための OpenStack サービスの設定" - -msgid "Configure OpenStack services to use Rabbit HA queues" -msgstr "RabbitMQ HA キューを使用するための OpenStack サービスの設定" - -msgid "" -"Configure OpenStack services to use the highly available Block Storage API" -msgstr "高可用性 Block Storage API を使用するための OpenStack サービスの設定" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Identity" -msgstr "高可用性 OpenStack Identity を使用するための OpenStack サービスの設定" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Image API" -msgstr "" -"高可用性 OpenStack Image Service API を使用するための OpenStack サービスの設" -"定" - -msgid "Configure RabbitMQ for HA queues" -msgstr "高可用性 キュー用の RabbitMQ の設定" - -msgid "Configure Shared File Systems API service" -msgstr "Shared File Systems API サービスの設定" - -msgid "Configure the OpenStack components to use at least two RabbitMQ nodes." -msgstr "" -"2 つ以上の RabbitMQ ノードを使用するよう、OpenStack のコンポーネントを設定し" -"ます。" - -msgid "Configure the VIP" -msgstr "仮想 IP の設定" - -msgid "" -"Configure the kernel parameter to allow non-local IP binding. This allows " -"running HAProxy instances to bind to a VIP for failover. Add following line " -"to ``/etc/sysctl.conf``:" -msgstr "" -"ローカル IP 以外のバインドを許可するために、カーネルパラメーターを設定しま" -"す。これにより、動作中の HAProxy インスタンスがフェイルオーバー用の仮想 IP を" -"バインドできるようになります。" - -msgid "Configuring Block Storage to listen on the VIP address" -msgstr "Block Storage がその仮想 IP アドレスをリッスンする設定" - -msgid "Configuring HAProxy" -msgstr "HAProxy の設定" - -msgid "Configuring InnoDB" -msgstr "InnoDB の設定" - -msgid "Configuring OpenStack services to use this IP address" -msgstr "OpenStack のサービスがこの IP アドレスを使用する設定" - -msgid "" -"Configuring RAID on the hard drives that implement storage protects your " -"data against a hard drive failure. If the node itself fails, data may be " -"lost. In particular, all volumes stored on an LVM node can be lost." -msgstr "" -"ストレージを実装するハードディスクに RAID を設定することにより、ハードディス" -"ク障害からデータを保護します。ノード自体が故障した場合、データが失われるかも" -"しれません。とくに、LVM ノードに保存されている全ボリュームは失われる可能性が" -"あります。" - -msgid "Configuring high availability for instances" -msgstr "インスタンスの高可用性の設定" - -msgid "Configuring mysqld" -msgstr "mysqld の設定" - -msgid "Configuring storage" -msgstr "ストレージの設定" - -msgid "Configuring the basic environment" -msgstr "基本環境の設定" - -msgid "Configuring the compute node" -msgstr "コンピュートノードの設定" - -msgid "Configuring the controller" -msgstr "コントローラーの設定" - -msgid "Configuring the networking services" -msgstr "ネットワークサービスの設定" - -msgid "Configuring the server" -msgstr "サーバーの設定" - -msgid "Configuring the shared services" -msgstr "共有サービスの設定" - -msgid "Configuring wsrep replication" -msgstr "wsrep レプリケーションの設定" - -msgid "" -"Connect an additional quorum device to allow small clusters remain quorate " -"during node outages" -msgstr "" -"追加のクォーラムデバイスを接続して、小規模なクラスターがノード障害時にクォー" -"ラムを取得できるようにします。" - -msgid "" -"Consider that, while exchanges and bindings survive the loss of individual " -"nodes, queues and their messages do not because a queue and its contents are " -"located on one node. If we lose this node, we also lose the queue." -msgstr "" -"エクスチェンジとバインドは個々のノード障害に耐えられますが、キューとそのメッ" -"セージは、あるノードに置かれるため、失われることを考慮してください。このノー" -"ドを失うとき、キューも失われます。" - -msgid "Contents" -msgstr "内容" - -msgid "" -"Corosync can be configured to work with either multicast or unicast IP " -"addresses or to use the votequorum library." -msgstr "" -"Corosync を動作させるための設定としては、マルチキャスト IP アドレスを使う、ユ" -"ニキャスト IP アドレスを使う、 votequorum ライブラリーを使う、の選択肢があり" -"ます。" - -msgid "" -"Corosync is started as a regular system service. Depending on your " -"distribution, it may ship with an LSB init script, an upstart job, or a " -"Systemd unit file." -msgstr "" -"Corosync は通常のシステムサービスとして起動します。お使いのディストリビュー" -"ションに応じて、LSB init スクリプト、upstart ジョブ、systemd ユニットファイル" -"を同梱しているかもしれません。" - -msgid "" -"Create a configuration file for ``clustercheck`` at ``/etc/sysconfig/" -"clustercheck``:" -msgstr "" -"``clustercheck`` の設定ファイルを ``/etc/sysconfig/clustercheck`` に作成しま" -"す。" - -msgid "" -"Create a configuration file for the HAProxy monitor service, at ``/etc/" -"xinetd.d/galera-monitor``:" -msgstr "" -"HAProxy モニターサービスの設定ファイルを ``/etc/xinetd.d/galera-monitor`` に" -"作成します。" - -msgid "" -"Create a symbolic link for the database server in the ``disable`` directory:" -msgstr "" -"``disable`` ディレクトリーにデータベースサーバーへのシンボリックリンクを作成" -"します。" - -msgid "" -"Create and name the cluster. Then, start it and enable all components to " -"auto-start at boot time:" -msgstr "" -"クラスターを作成して、名前を付けます。そして、それを起動して、すべてのコン" -"ポーネントが起動時に自動起動するようにします。" - -msgid "Create the Block Storage API endpoint with this IP." -msgstr "この IP を用いて Block Storage API エンドポイントを作成します。" - -msgid "Create the OpenStack Identity Endpoint with this IP address." -msgstr "" -"この IP アドレスを用いて OpenStack Identity エンドポイントを作成します。" - -msgid "Current upstream work" -msgstr "アップストリームの現在の取り組み" - -msgid "" -"Data integrity through fencing (a non-responsive process does not imply it " -"is not doing anything)" -msgstr "" -"フェンシングによるデータ完全性 (応答なしプロセスが何もしていないことを意味し" -"ます)" - -msgid "Data loss: Accidental deletion or destruction of data." -msgstr "データ損失: 意図しないデータの削除や破損。" - -msgid "Database (Galera Cluster) for high availability" -msgstr "データベース (Galera クラスター) の高可用性" - -msgid "Database configuration" -msgstr "データベース設定" - -msgid "Database hosts with Galera Cluster installed" -msgstr "Galera Cluster をインストールしたデータベースホスト" - -msgid "" -"Define the InnoDB memory buffer pool size. The default value is 128 MB, but " -"to compensate for Galera Cluster's additional memory usage, scale your usual " -"value back by 5%:" -msgstr "" -"InnoDB メモリーバッファープールサイズを定義します。デフォルト値は 128 MB です" -"が、Galera Cluster の追加メモリー使用状況に対して補うために、通常の値を 5% ま" -"でスケールさせてください。" - -msgid "Deployment flavors" -msgstr "デプロイフレーバー" - -msgid "Deployment strategies" -msgstr "デプロイ戦略" - -msgid "Description" -msgstr "説明" - -msgid "" -"Do not change this value. Other modes may cause ``INSERT`` statements on " -"tables with auto-increment columns to fail as well as unresolved deadlocks " -"that leave the system unresponsive." -msgstr "" -"この値を変更してはいけません。他のモジュールが、自動インクリメントの列を用い" -"てテーブルに ``INSERT`` ステートメントを発行するかもしれません。これは、シス" -"テムが応答不可になる解決不能なデッドロックに陥ります。" - -msgid "Download the resource agent to your system:" -msgstr "まず、お使いのシステムにリソースエージェントをダウンロードします。" - -msgid "" -"Each configured interface must have a unique ``ringnumber``, starting with 0." -msgstr "" -"設定済みの各インターフェースは、0 から始まる一意な ``ringnumber`` を持つ必要" -"があります。" - -msgid "Each instance has its own IP address:" -msgstr "各インスタンスは、自身の IP アドレスを持ちます。" - -msgid "" -"Each instance of HAProxy configures its front end to accept connections only " -"to the virtual IP (VIP) address. The HAProxy back end (termination point) is " -"a list of all the IP addresses of instances for load balancing." -msgstr "" -"HAProxy の各インスタンスは、仮想 IP アドレスへの接続のみを受け付けるよう、そ" -"のフロントエンドを設定します。HAProxy のバックエンド (接続先) は、負荷分散さ" -"れるインスタンスの IP アドレスの一覧です。" - -msgid "" -"Each service also has a backup but manages both the main and redundant " -"systems concurrently. This way, if there is a failure, the user is unlikely " -"to notice. The backup system is already online and takes on increased load " -"while the main system is fixed and brought back online." -msgstr "" -"各サービスはバックアップも持ちますが、メインと冗長システムを同時に管理しま" -"す。このように、ユーザーが気が付かない障害が発生した場合、バックアップシステ" -"ムはすでにオンラインであり、メインシステムが復旧され、オンラインになるまでの" -"間は負荷が高くなります。" - -msgid "" -"Edit the :file:`/etc/glance/glance-api.conf` file to configure the OpenStack " -"Image service:" -msgstr "" -":file:`/etc/glance/glance-api.conf` ファイルを編集して、OpenStack Image サー" -"ビスを設定します。" - -msgid "Edit the :file:`/etc/manila/manila.conf` file:" -msgstr "`/etc/manila/manila.conf` ファイルを編集します。" - -msgid "" -"Edit the :file:`keystone.conf` file to change the values of the :manpage:" -"`bind(2)` parameters:" -msgstr "" -":file:`keystone.conf` ファイルを編集して、 :manpage:`bind(2)` パラメーターの" -"値を変更します。" - -msgid "" -"Edit the ``/etc/cinder/cinder.conf`` file. For example, on a RHEL-based " -"system:" -msgstr "" -"``/etc/cinder/cinder.conf`` ファイルを編集します。たとえば、RHEL 系システムの" -"場合:" - -msgid "Enhanced failure detection" -msgstr "高度な障害検出" - -msgid "" -"Ensure that the InnoDB locking mode for generating auto-increment values is " -"set to ``2``, which is the interleaved locking mode:" -msgstr "" -"自動インクリメント値を生成するための InnoDB ロックモードがをきちんと``2`` に" -"設定してください。これは、インターリーブ・ロックモードです。" - -msgid "" -"Ensure that the InnoDB log buffer is written to file once per second, rather " -"than on each commit, to improve performance:" -msgstr "" -"パフォーマンスを改善するために、InnoDB ログバッファーが、コミットごとではな" -"く、1 秒ごとにファイルに書き込むことを確認します。" - -msgid "" -"Ensure that the binary log format is set to use row-level replication, as " -"opposed to statement-level replication:" -msgstr "" -"バイナリーログ形式が、ステートメントレベルのレプリケーションではなく、行レベ" -"ルのレプリケーションに設定されていることを確認してください。" - -msgid "" -"Ensure that the database server is not bound only to the localhost: " -"``127.0.0.1``. Also, do not bind it to ``0.0.0.0``. Binding to the localhost " -"or ``0.0.0.0`` makes ``mySQL`` bind to all IP addresses on the machine, " -"including the virtual IP address causing ``HAProxy`` not to start. Instead, " -"bind to the management IP address of the controller node to enable access by " -"other nodes through the management network:" -msgstr "" -"データベースサーバーが localhost: ``127.0.0.1`` のみにバインドされていないこ" -"とを確認してください。また、``0.0.0.0`` にバインドしないでください。" -"localhost や ``0.0.0.0`` にバインドすることにより、MySQL がマシンのすべての " -"IP アドレスにバインドされます。これは仮想 IP アドレスを含み、``HAProxy`` が起" -"動しなくなります。代わりに、コントローラーノードの管理 IP アドレスにバインド" -"して、管理ネットワーク経由で他のノードによりアクセスできるようにします。" - -msgid "Ensure that the default storage engine is set to InnoDB:" -msgstr "デフォルトのストレージエンジンをきちんと InnoDB に設定してください。" - -msgid "" -"Ensure your HAProxy installation is not a single point of failure, it is " -"advisable to have multiple HAProxy instances running." -msgstr "" -"HAProxy が単一障害点にならないようにします。複数の HAProxy インスタンスを実行" -"することが推奨されます。" - -msgid "" -"Ephemeral storage is allocated for an instance and is deleted when the " -"instance is deleted. The Compute service manages ephemeral storage and by " -"default, Compute stores ephemeral drives as files on local disks on the " -"compute node. As an alternative, you can use Ceph RBD as the storage back " -"end for ephemeral storage." -msgstr "" -"一時ストレージは、インスタンスのために割り当てられ、インスタンスの削除時に削" -"除されます。Compute サービスが一時ストレージを管理します。Compute はデフォル" -"トで、コンピュートノードのローカルディスクにファイルとして一時ディスクを保存" -"します。代わりに、一時ストレージのストレージバックエンドとして Ceph RBD を使" -"用できます。" - -msgid "" -"Even a distributed or replicated application that is able to survive " -"failures on one or more machines can benefit from a cluster manager because " -"a cluster manager has the following capabilities:" -msgstr "" -"いくつかのマシンの障害に耐えられる分散アプリケーションやレプリケーションで" -"も、クラスターマネージャーが以下の機能を持つので、クラスターマネージャーによ" -"る恩恵があります。" - -msgid "Existing solutions" -msgstr "既存のソリューション" - -msgid "Facility services such as power, air conditioning, and fire protection" -msgstr "電源、空調、防火などに関する設備" - -msgid "Firewall" -msgstr "ファイアウォール" - -msgid "" -"For Liberty, you can not have the standalone network nodes. The Networking " -"services are run on the controller nodes. In this guide, the term `network " -"nodes` is used for convenience." -msgstr "" -"Liberty の場合、独立したネットワークノードを一般的に持ちません。Networking " -"サービスはコントローラーノードにおいて実行されます。このガイドでは、便宜上" -"「ネットワークノード」という言葉を使用します。" - -msgid "" -"For OpenStack Compute, (if your OpenStack Identity service IP address is " -"10.0.0.11) use the following configuration in the :file:`api-paste.ini` file:" -msgstr "" -"OpenStack Compute の場合 (OpenStack Identity サービスの IP アドレスが " -"10.0.0.11 の場合)、以下の設定を :file:`api-paste.ini` ファイルに使用します。" - -msgid "For RHEL, Fedora, or CentOS:" -msgstr "RHEL、Fedora、CentOS の場合:" - -msgid "" -"For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the " -"following process uses Systemd unit files." -msgstr "" -"Red Hat Enterprise Linux および Red Hat 系の Linux ディストリビューションの場" -"合、以下のプロセスが Systemd ユニットファイルを使用します。" - -msgid "" -"For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. You should " -"verify the fingerprint of the imported GPG key before using it." -msgstr "" -"SLES 12 の場合、パッケージは GPG キー 893A90DAD85F9316 により署名されていま" -"す。使用する前に、インポートした GPG キーのフィンガープリントを検証すべきで" -"す。" - -msgid "For SLES 12:" -msgstr "SLES 12 の場合:" - -msgid "" -"For UDPU, every node that should be a member of the membership must be " -"specified." -msgstr "" -"UDPUでは、全てのノードがメンバーシップメンバーを指定しなければなりません。" - -msgid "" -"For Ubuntu 16.04.1: Create a configuration file for ``clustercheck`` at ``/" -"etc/default/clustercheck``." -msgstr "" -"Ubuntu 16.04.1 の場合: ``clustercheck`` の設定ファイルを ``/etc/default/" -"clustercheck`` に作成します。" - -msgid "For Ubuntu or Debian:" -msgstr "Ubuntu、Debian の場合:" - -msgid "" -"For Ubuntu, you should also enable the Corosync service in the ``/etc/" -"default/corosync`` configuration file." -msgstr "" -"Ubuntu の場合、 ``/etc/default/corosync`` 設定ファイルにおいて Corosync サー" -"ビスも有効化すべきです。" - -msgid "" -"For `Fedora `_" -msgstr "" -"`Fedora `_ の場合" - -msgid "For ``crmsh``:" -msgstr "``crmsh`` の場合:" - -msgid "For ``pcs``:" -msgstr "``pcs`` の場合:" - -msgid "" -"For a complete list of the available parameters, run the ``SHOW VARIABLES`` " -"command from within the database client:" -msgstr "" -"利用できるパラメーターの一覧は、データベースクライアントから ``SHOW " -"VARIABLES`` コマンドを実行してください。" - -msgid "" -"For backward compatibility and supporting existing deployments, the central " -"agent configuration supports using different configuration files. This is " -"for groups of service instances that are running in parallel. For enabling " -"this configuration, set a value for the ``partitioning_group_prefix`` option " -"in the `polling section `_ in the OpenStack Configuration " -"Reference." -msgstr "" -"既存の環境の後方互換性とサポートのために、中央エージェントの設定は、別の設定" -"ファイルを使用することをサポートします。これは並列で実行しているサービスイン" -"スタンスのグループのためです。この設定を有効化するために、OpenStack " -"Configuration Reference の `polling section `_ にある " -"``partitioning_group_prefix`` オプションの値を設定します。" - -msgid "" -"For demonstrations and studying, you can set up a test environment on " -"virtual machines (VMs). This has the following benefits:" -msgstr "" -"デモや学習の場合、仮想マシンにテスト環境をセットアップできます。これには以下" -"の利点があります。" - -msgid "" -"For detailed instructions about installing HAProxy on your nodes, see the " -"HAProxy `official documentation `_." -msgstr "" -"お使いのノードに HAProxy をインストールする方法の詳細は HAProxy `公式ドキュメ" -"ント `_ を参照してください。" - -msgid "" -"For documentation about these parameters, ``wsrep`` provider option, and " -"status variables available in Galera Cluster, see the Galera cluster " -"`Reference `_." -msgstr "" -"Galera Cluster において利用できる、これらのパラメーター、``wsrep`` プロバイ" -"ダーオプション、状態変数のドキュメントは、Galera クラスターの`リファレンス " -"`_ を参照して" -"ください。" - -msgid "" -"For each sub-group of the central agent pool with the same " -"``partitioning_group_prefix``, a disjoint subset of meters must be polled to " -"avoid samples being missing or duplicated. The list of meters to poll can be " -"set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. For " -"more information about pipelines see the `Data processing and pipelines " -"`_ " -"section." -msgstr "" -"同じ ``partitioning_group_prefix`` を持つ中央エージェントプールの各サブグルー" -"プに対して、サンプルの損失や重複を避けるために、互いに関わらないメーターのサ" -"ブセットが取得される必要があります。取得されるメーターの一覧は :file:`/etc/" -"ceilometer/pipeline.yaml` 設定ファイルに設定できます。パイプラインの詳細は " -"`Data processing and pipelines `_ のセクションを参照してください。" - -msgid "" -"For environments that do not support multicast, Corosync should be " -"configured for unicast. An example fragment of the :file:`corosync.conf` " -"file for unicastis is shown below:" -msgstr "" -"マルチキャストをサポートしていない場合、Corosync はユニキャストで設定すべきで" -"す。ユニキャスト向け :file:`corosync.conf` ファイルの設定例を以下に示します。" - -msgid "" -"For example, if your OpenStack Image API service IP address is 10.0.0.11 (as " -"in the configuration explained here), you would use the following " -"configuration in your :file:`nova.conf` file:" -msgstr "" -"例えば、OpenStack Image API サービスの IP アドレスが (ここで説明されている設" -"定のように) 10.0.0.11 ならば、以下の設定を :file:`nova.conf` ファイルに使用し" -"ます。" - -msgid "" -"For example, in a seven-node cluster, the quorum should be set to " -"``floor(7/2) + 1 == 4``. If quorum is four and four nodes fail " -"simultaneously, the cluster itself would fail, whereas it would continue to " -"function, if no more than three nodes fail. If split to partitions of three " -"and four nodes respectively, the quorum of four nodes would continue to " -"operate the majority partition and stop or fence the minority one (depending " -"on the no-quorum-policy cluster configuration)." -msgstr "" -"たとえば、7 ノードクラスターにおいて、クォーラムは ``floor(7/2) + 1 == 4`` に" -"設定されるべきです。クォーラムが 4 で、4 ノードが同時に停止した場合、クラス" -"ター自身が停止するでしょう。一方、3 ノード以下の停止の場合、動作し続けられる" -"でしょう。それぞれ 3 ノードと 4 ノードに分割された場合、4 ノードのクラスター" -"のクォーラムが多数派のパーティションを動作し続け、(no-quorum-policy クラス" -"ター設定に応じて) 少数派を停止またはフェンスするでしょう。" - -msgid "" -"For example, you may enter ``edit p_ip_glance-api`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"例えば、お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` " -"メニューから ``edit p_ip_glance-api`` と入力し、リソースを編集できます。" - -msgid "" -"For example, you may enter ``edit p_ip_keystone`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"例えば、お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` " -"メニューから ``edit p_ip_keystone`` と入力し、リソースを編集できます。" - -msgid "" -"For example, you may enter ``edit p_ip_manila-api`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"例えば、お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` " -"メニューから ``edit p_ip_manila-api`` と入力し、リソースを編集できます。" - -msgid "" -"For firewall configurations, Corosync communicates over UDP only, and uses " -"``mcastport`` (for receives) and ``mcastport - 1`` (for sends)." -msgstr "" -"ファイアウォール設定に向け、Corosync は UDP のみで通信して、 ``mcastport`` " -"(受信用) と ``mcastport - 1`` (送信用) を使用します。" - -msgid "" -"For information about the required configuration options to set in the :file:" -"`ceilometer.conf`, see the `coordination section `_ in the OpenStack Configuration " -"Reference." -msgstr "" -":file:`ceilometer.conf` 設定ファイルに設定する必要があるオプションの詳細は、" -"OpenStack Configuration Reference の `coordination section `_ を参照してください。" - -msgid "" -"For more information about configuring storage back ends for the different " -"storage options, see `Manage volumes `_ in the OpenStack Administrator Guide." -msgstr "" -"さまざまなストレージの選択肢に対して、ストレージバックエンドを設定する方法の" -"詳細は、OpenStack Administrator Guide の `Manage volumes `_ を参照してくだ" -"さい。" - -msgid "" -"For more information on configuring SELinux to work with Galera Cluster, see " -"the `SELinux Documentation `_" -msgstr "" -"Galera Cluster と動作する SELinux を設定する方法の詳細は `SELinux ドキュメン" -"ト `_ を参照し" -"てください。" - -msgid "" -"For more information on firewalls, see `firewalls and default ports `_ in OpenStack " -"Administrator Guide." -msgstr "" -"ファイアウォールの詳細は、OpenStack Administrator Guide の `Firewalls and " -"default ports `_ を参照してください。" - -msgid "" -"For more information, see the official installation manual for the " -"distribution:" -msgstr "" -"詳細はディストリビューションの公式インストールガイドを参照してください。" - -msgid "For openSUSE:" -msgstr "openSUSE の場合:" - -msgid "For servers that use ``systemd``, run the following command:" -msgstr "``systemd`` を使用するサーバーの場合、以下のコマンドを実行します。" - -msgid "For servers that use ``systemd``, run the following commands:" -msgstr "``systemd`` を使用するサーバーの場合、以下のコマンドを実行します。" - -msgid "" -"For these reasons, we highly recommend the use of a cluster manager like " -"`Pacemaker `_." -msgstr "" -"これらの理由のため、`Pacemaker `_ のようなクラスター" -"マネージャーを使用することを強く推奨します。" - -msgid "" -"For this reason, each cluster in a high availability environment should have " -"an odd number of nodes and the quorum is defined as more than a half of the " -"nodes. If multiple nodes fail so that the cluster size falls below the " -"quorum value, the cluster itself fails." -msgstr "" -"この理由により、高可用性環境における各クラスターは、奇数個のコードを持つべき" -"であり、クォーラムがノードの過半数に定義されます。クラスター数がクォーラム値" -"を下回るよう、複数のノードが停止した場合、クラスター自身が停止します。" - -msgid "" -"Galera Cluster configuration parameters all have the ``wsrep_`` prefix. You " -"must define the following parameters for each cluster node in your OpenStack " -"database." -msgstr "" -"Galera Cluster の設定パラメーターは、すべて ``wsrep_`` プレフィックスを持ちま" -"す。OpenStack データベースにおいて、各クラスターノード向けに以下のパラメー" -"ターを定義する必要があります。" - -msgid "" -"Galera Cluster does not support non-transactional storage engines and " -"requires that you use InnoDB by default. There are some additional " -"parameters that you must define to avoid conflicts." -msgstr "" -"Galera Cluster は、トランザクション未対応ストレージエンジンをサポートしませ" -"ん。デフォルトでは InnoDB を使用する必要があります。競合を避けるために定義す" -"る必要のある追加パラメーターがいくつかあります。" - -msgid "" -"Galera Cluster requires that you open the following ports to network traffic:" -msgstr "" -"Galera Cluster は、ネットワーク通信のために以下のポートを開く必要があります。" - -msgid "Galera can be configured using one of the following strategies:" -msgstr "Galera は、以下の方法のどれかにより設定できます。" - -msgid "Galera runs behind HAProxy:" -msgstr "Galera は HAProxy の後ろで動作します。" - -msgid "" -"Galera synchronous replication guarantees a zero slave lag. The failover " -"procedure completes once HAProxy detects that the active back end has gone " -"down and switches to the backup one, which is then marked as ``UP``. If no " -"back ends are ``UP``, the failover procedure finishes only when the Galera " -"Cluster has been successfully reassembled. The SLA is normally no more than " -"5 minutes." -msgstr "" -"Galera の同期レプリケーションは、スレーブのラグがないことを保証します。フェイ" -"ルオーバー手順は、アクティブなバックエンドがダウンしたことを HAProxy が検知す" -"ると、バックアップに切り替え、``UP`` 状態になります。バックエンドが ``UP`` に" -"ならない場合、Galera クラスターが再び正常に再構成された場合のみ、フェイルオー" -"バー手順が完了します。SLA は、通常 5 分以内です。" - -msgid "HAProxy" -msgstr "HAProxy" - -msgid "" -"HAProxy load balances incoming requests and exposes just one IP address for " -"all the clients." -msgstr "" -"HAProxy は、受信リクエストを負荷分散して、すべてのクライアントに 1 つの IP ア" -"ドレスを公開します。" - -msgid "" -"HAProxy provides a fast and reliable HTTP reverse proxy and load balancer " -"for TCP or HTTP applications. It is particularly suited for web crawling " -"under very high loads while needing persistence or Layer 7 processing. It " -"realistically supports tens of thousands of connections with recent hardware." -msgstr "" -"HAProxy は、TCP や HTTP ベースのアプリケーションに、高速かつ高信頼な HTTP リ" -"バースプロキシーとロードバランサーを提供します。とくに、永続性や L7 処理を必" -"要とする、非常に高負荷な Web サイトに適しています。最近のハードウェアを用いる" -"と、数千の接続を現実的にサポートします。" - -msgid "Hardware considerations for high availability" -msgstr "高可用性のためのハードウェア考慮事項" - -msgid "Hardware setup" -msgstr "ハードウェアのセットアップ" - -msgid "" -"High availability is implemented with redundant hardware running redundant " -"instances of each service. If one piece of hardware running one instance of " -"a service fails, the system can then failover to use another instance of a " -"service that is running on hardware that did not fail." -msgstr "" -"高可用性は、各サービスの冗長インスタンスを実行する、冗長ハードウェアを用いて" -"実装されます。あるサービスのインスタンスの 1 つを実行しているハードウェアの部" -"品が故障した場合、システムはフェイルオーバーして、故障していないハードウェア" -"で動作している別のサービスインスタンスを使用します。" - -msgid "" -"High availability is not for every user. It presents some challenges. High " -"availability may be too complex for databases or systems with large amounts " -"of data. Replication can slow large systems down. Different setups have " -"different prerequisites. Read the guidelines for each setup." -msgstr "" -"高可用性はあらゆるユーザー向けではありません。いくつかの挑戦を妨害します。高" -"可用性は、大量のデータを持つデータベースやシステムをあまりに複雑にする可能性" -"があります。レプリケーションは大規模システムをスローダウンさせる可能性があり" -"ます。異なるセットアップには、異なる事前要件があります。各セットアップのガイ" -"ドラインを参照してください。" - -msgid "High availability is turned off as the default in OpenStack setups." -msgstr "高可用性は、デフォルトの OpenStack セットアップで無効化されています。" - -msgid "High availability systems seek to minimize the following issues:" -msgstr "高可用性システムは、以下の問題を最小化することを目指しています。" - -msgid "" -"High availability systems typically achieve an uptime percentage of 99.99% " -"or more, which roughly equates to less than an hour of cumulative downtime " -"per year. In order to achieve this, high availability systems should keep " -"recovery times after a failure to about one to two minutes, sometimes " -"significantly less." -msgstr "" -"高可用性システムは、一般的に 99.99% 以上の稼働率を達成します。おそよ年間 1 時" -"間未満の停止時間になります。高可用性システムは、これを実現するために、障害発" -"生後の復旧時間を 1 ~ 2 分以内に、ときにはさらに短く抑えるべきです。" - -msgid "Highly available Block Storage API" -msgstr "高可用性 Block Storage API" - -msgid "Highly available Identity API" -msgstr "高可用性 Identity API" - -msgid "Highly available Image API" -msgstr "高可用性 Image API" - -msgid "Highly available Shared File Systems API" -msgstr "高可用性 Shared File Systems API" - -msgid "Highly available Telemetry" -msgstr "高可用性 Telemetry" - -msgid "How long to back-off for between retries when connecting to RabbitMQ:" -msgstr "RabbitMQ に接続するとき再試行するまでにバックオフする間隔:" - -msgid "" -"However, running an OpenStack environment on VMs degrades the performance of " -"your instances, particularly if your hypervisor or processor lacks support " -"for hardware acceleration of nested VMs." -msgstr "" -"しかしながら、仮想マシン上で OpenStack 環境を実行すると、インスタンスの性能が" -"悪くなります。とくに、ハイパーバイザーとプロセッサーが nested 仮想マシンの" -"ハードウェア支援機能をサポートしない場合は顕著です。" - -msgid "" -"If the Block Storage service runs on the same nodes as the other services, " -"then it is advisable to also include:" -msgstr "" -"Block Storage サービスが他のサービスと同じノードで実行している場合、以下も含" -"めることを推奨します。" - -msgid "" -"If the Identity service will be sending ceilometer notifications and your " -"message bus is configured for high availability, you will need to ensure " -"that the Identity service is correctly configured to use it. For details on " -"how to configure the Identity service for this kind of deployment, see :doc:" -"`shared-messaging`." -msgstr "" -"Identity サービスが ceilometer の通知を送信して、メッセージバスが高可用性のた" -"めに設定されている場合、Identity サービスがきちんとそれを使用するよう設定する" -"必要があります。この種の導入向けに Identity サービスを設定する方法の詳細は、:" -"doc:`shared-messaging` を参照してください。" - -msgid "" -"If the ``broadcast`` parameter is set to ``yes``, the broadcast address is " -"used for communication. If this option is set, the ``mcastaddr`` parameter " -"should not be set." -msgstr "" -"``broadcast`` パラメーターが ``yes`` に設定されている場合、ブロードキャストア" -"ドレスが通信に使用されます。このオプションが設定されている場合、" -"``mcastaddr`` パラメーターは設定すべきではありません。" - -msgid "" -"If the cluster is working, you can create usernames and passwords for the " -"queues." -msgstr "" -"クラスターが動作していると、キューのユーザー名とパスワードを作成できます。" - -msgid "" -"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " -"lines under the service stanza. These stanzas enable Pacemaker to start up. " -"Another potential problem is the boot and shutdown order of Corosync and " -"Pacemaker. To force Pacemaker to start after Corosync and stop before " -"Corosync, fix the start and kill symlinks manually:" -msgstr "" -"Ubuntu 14.04 において Corosync バージョン 2 を使用している場合、サービスの節" -"の下にある行を削除するかコメントアウトします。これらの節により、Pacemaker が" -"起動できます。別の潜在的な問題は、Corosync と Pacemaker の起動と停止の順番で" -"す。必ず Pacemaker が Corosync の後に起動して、Corosync の前に停止させるため" -"に、start と kill のシンボリックリンクを手動で修正します。" - -msgid "" -"If you are using Corosync version 2, use the :command:`corosync-cmapctl` " -"utility instead of :command:`corosync-objctl`; it is a direct replacement." -msgstr "" -"Corosync バージョン 2 を使用している場合、 :command:`corosync-objctl` の代わ" -"りに :command:`corosync-cmapctl` ユーティリティーを使用します。これは、そのま" -"ま置き換えられます。" - -msgid "" -"If you are using both private and public IP addresses, create two virtual IP " -"addresses and define the endpoint. For example:" -msgstr "" -"プライベート IP とパブリック IP の両方を使用する場合、2 つの仮想 IP アドレス" -"を作成し、次のようにエンドポイントを定義します。" - -msgid "" -"If you are using both private and public IP addresses, create two virtual " -"IPs and define your endpoint. For example:" -msgstr "" -"プライベート IP アドレスとパブリック IP アドレスの両方を使用する場合、2 つの" -"仮想 IP アドレスを作成し、次のようにエンドポイントを定義します。" - -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IPs and define your endpoints like this:" -msgstr "" -"プライベート IP アドレスとパブリック IP アドレスの両方を使用する場合、2 つの" -"仮想 IP アドレスを作成し、次のようにエンドポイントを定義すべきです。" - -msgid "" -"If you are using the Block Storage service OCF agent, some settings will be " -"filled in for you, resulting in a shorter configuration file:" -msgstr "" -"Block Storage サービス OCF エージェントを使用している場合、いくつかの設定は入" -"力されていて、設定ファイルを短くできます。" - -msgid "" -"If you are using the horizon Dashboard, edit the :file:`local_settings.py` " -"file to include the following:" -msgstr "" -"Dashboard を使用している場合、以下の内容を含めた :file:`local_settings.py` " -"ファイルを編集します。" - -msgid "" -"If you change the configuration from an old set-up that did not use HA " -"queues, restart the service:" -msgstr "" -"HA キューを使用していない古いセットアップから設定を変更した場合、サービスを再" -"起動します。" - -msgid "" -"If you use HAProxy as a load-balancing client to provide access to the " -"Galera Cluster, as described in the :doc:`controller-ha-haproxy`, you can " -"use the ``clustercheck`` utility to improve health checks." -msgstr "" -":doc:`controller-ha-haproxy` に記載されているとおり、Galera Cluster へのクラ" -"イアントアクセスを負荷分散するために、HAProxy を使用している場合、 " -"``clustercheck`` ユーティリティーを使用して、より良くヘルスチェックできます。" - -msgid "" -"In Corosync, configurations use redundant networking (with more than one " -"interface). This means you must select a Redundant Ring Protocol (RRP) mode " -"other than none. We recommend ``active`` as the RRP mode." -msgstr "" -"Corosync において、設定は (複数のインターフェースを用いた) 冗長ネットワークを" -"使用します。これは ``none`` ではなく、Redundant Ring Protocol (RRP) を選択す" -"る必要があることを意味します。``active`` が RRP の推奨モードです。" - -msgid "" -"In Red Hat Enterprise Linux or CentOS environments, this is a recommended " -"path to perform configuration. For more information, see the `RHEL docs " -"`_." -msgstr "" -"Red Hat Enterprise Linux や CentOS 環境の場合、設定するための推奨パスがありま" -"す。詳細は `RHEL docs `_ を参照してください。" - -msgid "" -"In a collapsed configuration, there is a single cluster of 3 or more nodes " -"on which every component is running." -msgstr "" -"この折りたたまれた設定では、すべてのコンポーネントが動作する、3 つ以上のノー" -"ドを持つシングルクラスターがあります。" - -msgid "" -"In addition to Galera Cluster, you can also achieve high availability " -"through other database options, such as PostgreSQL, which has its own " -"replication system." -msgstr "" -"Galera Cluster 以外に、独自のレプリケーションシステムを持つ PostgreSQL など、" -"他のデータベースにより高可用性を実現することもできます。" - -msgid "" -"In general, we can divide all the OpenStack components into three categories:" -msgstr "" -"一般的に、すべての OpenStack コンポーネントは 3 つのカテゴリーに分割できま" -"す。" - -msgid "" -"In the Galera Cluster, the Primary Component is the cluster of database " -"servers that replicate into each other. In the event that a cluster node " -"loses connectivity with the Primary Component, it defaults into a non-" -"operational state, to avoid creating or serving inconsistent data." -msgstr "" -"Galera Cluster では、Primary Component が、お互いにレプリケーションするデータ" -"ベースサーバーのクラスターです。クラスターノードが Primary Component との接続" -"性を失った場合、不整合なデータの作成や処理を避けるために、デフォルトで非稼働" -"状態になります。" - -msgid "" -"In the event that a component fails and a back-up system must take on its " -"load, most high availability systems will replace the failed component as " -"quickly as possible to maintain necessary redundancy. This way time spent in " -"a degraded protection state is minimized." -msgstr "" -"コンポーネントが故障して、バックアップシステムがその負荷を引き継ぐ場合、多く" -"の高可用性システムは、十分な冗長性を維持するために、できる限り早く故障したコ" -"ンポーネントを置き換えます。この方法は、デグレードされた保護状態を最小化する" -"ことに時間を使います。" - -msgid "" -"In the event that you need to restart any cluster node, you can do so. When " -"the database server comes back it, it establishes connectivity with the " -"Primary Component and updates itself to any changes it may have missed while " -"down." -msgstr "" -"クラスターノードをどれか再起動する必要がある場合、実行できます。データベース" -"サーバーが戻ってきたとき、Primary Component との接続を確立して、停止中に失っ" -"た変更をすべて自身に適用します。" - -msgid "" -"In theory, you can run the Block Storage service as active/active. However, " -"because of sufficient concerns, we recommend running the volume component as " -"active/passive only." -msgstr "" -"理論的には、Block Storage サービスをアクティブ/アクティブとして実行できます。" -"しかしながら、いくつかの課題のため、ボリュームコンポーネントをアクティブ/パッ" -"シブのみとして実行することが推奨されます。" - -msgid "" -"In this configuration, each service runs in a dedicated cluster of 3 or more " -"nodes." -msgstr "" -"この設定では、各サービスが 3 以上のノードの専用クラスターで動作します。" - -msgid "" -"Individual cluster nodes can stop and be restarted without issue. When a " -"database loses its connection or restarts, the Galera Cluster brings it back " -"into sync once it reestablishes connection with the Primary Component. In " -"the event that you need to restart the entire cluster, identify the most " -"advanced cluster node and initialize the Primary Component on that node." -msgstr "" -"各クラスターノードは、問題なく停止したり再起動したりできます。データベースが" -"接続を失ったり、再起動したりしたとき、Primary Component と再接続されると、" -"Galera Cluster は同期状態に戻ります。クラスター全体を再起動する必要があると" -"き、最も高度なクラスターノードを識別し、そのノードの Primary Component を初期" -"化します。" - -msgid "" -"Initialize the Primary Component on one cluster node. For servers that use " -"``init``, run the following command:" -msgstr "" -"1 つのクラスターノードにおいて Primary Component を初期化します。``init`` を" -"使用するサーバーの場合、以下のコマンドを実行します。" - -msgid "Initializing the cluster" -msgstr "クラスターの初期化" - -msgid "Install RabbitMQ" -msgstr "RabbitMQ のインストール" - -msgid "Install packages" -msgstr "パッケージのインストール" - -msgid "Installing Memcached" -msgstr "Memcached のインストール" - -msgid "Installing the operating system" -msgstr "オペレーティングシステムのインストール" - -msgid "Introduction to OpenStack high availability" -msgstr "OpenStack 高可用性の概要" - -msgid "" -"It is also possible to follow a segregated approach for one or more " -"components that are expected to be a bottleneck and use a collapsed approach " -"for the remainder." -msgstr "" -"1 つ以上のコンポーネントに対して、別々のアプローチをとることができますが、ボ" -"トルネックになり、思い出すことが難しいアプローチを使用する可能性があります。" - -msgid "" -"It is possible to add controllers to such an environment to convert it into " -"a truly highly available environment." -msgstr "" -"コントローラーをそのような環境に追加して、それを信頼できる高可用性環境に変え" -"られます。" - -msgid "" -"It is possible to deploy three different flavors of the Pacemaker " -"architecture. The two extremes are ``Collapsed`` (where every component runs " -"on every node) and ``Segregated`` (where every component runs in its own 3+ " -"node cluster)." -msgstr "" -"3 種類の Pacemaker アーキテクチャーを導入できます。2 つの極端なものは、" -"``Collapsed`` (すべてのコンポーネントがすべてのノードで動作する) と " -"``Segregated`` (すべてのコンポーネントが自身の 3+ ノードクラスターで動作す" -"る) です。" - -msgid "" -"It is storage and application-agnostic, and in no way specific to OpenStack." -msgstr "" -"ストレージとアプリケーションから独立していて、OpenStack 特有の方法はありませ" -"ん。" - -msgid "" -"It is very important that all members of the system share the same view of " -"who their peers are and whether or not they are in the majority. Failure to " -"do this leads very quickly to an internal `split-brain `_ state. This is where different parts of " -"the system are pulling in different and incompatible directions." -msgstr "" -"システムのすべてのメンバーが、誰がメンバーであるか、それらが多数派かどうかに" -"ついて、同じビューを共有することが非常に重要です。これを行う障害は、かなりす" -"ぐに `スプリットブレイン `_ 状態を引き起こします。これは、システムの別々の部分が、" -"別々な互換性のない方向を引き込むことです。" - -msgid "List the nodes known to the quorum service" -msgstr "クォーラムサービスが把握しているノードの一覧表示" - -msgid "Load distribution" -msgstr "負荷分散" - -msgid "" -"Log in to the database client and grant the ``clustercheck`` user " -"``PROCESS`` privileges:" -msgstr "" -"データベースクライアントにログインして、``clustercheck`` ユーザーに " -"``PROCESS`` 権限を与えます。" - -msgid "" -"Maintains a redundant instance that can be brought online when the active " -"service fails. For example, OpenStack writes to the main database while " -"maintaining a disaster recovery database that can be brought online if the " -"main database fails." -msgstr "" -"動作中のサービスが停止したとき、オンラインにできる冗長インスタンスを維持しま" -"す。例えば、メインのデータベースが故障したとき、オンラインになる災害対策デー" -"タベースを維持する限り、OpenStack はメインのデータベースに書き込みます。" - -msgid "Make sure `pcs` is running and configured to start at boot time:" -msgstr "" -"`pcs` が実行中で、ブート時に起動するよう設定されていることを確認してくださ" -"い。" - -msgid "" -"Make sure to save the changes once you are done. This will vary depending on " -"your distribution:" -msgstr "" -"完了後、きちんと変更を保存してください。これは、お使いのディストリビューショ" -"ンにより異なります。" - -msgid "" -"Making the Block Storage (cinder) API service highly available in active/" -"active mode involves:" -msgstr "" -"Block Storage (cinder) API サービスのアクティブ/アクティブモードでの高可用性" -"は、以下が関係します。" - -msgid "" -"Making the Block Storage API service highly available in active/passive mode " -"involves:" -msgstr "" -"Block Storage API サービスのアクティブ/パッシブモードでの高可用性は、以下が関" -"係します。" - -msgid "" -"Making the OpenStack Identity service highly available in active and passive " -"mode involves:" -msgstr "" -"OpenStack Identity Service をアクティブ・パッシブモードで高可用性にすること" -"は、次のことが関連します。" - -msgid "" -"Making the RabbitMQ service highly available involves the following steps:" -msgstr "RabbitMQ サービスを高可用性にすることは、以下の手順が関連します。" - -msgid "" -"Making the Shared File Systems (manila) API service highly available in " -"active/passive mode involves:" -msgstr "" -"Shared File Systems (manila) API サービスのアクティブ/パッシブモードでの高可" -"用性は、以下が関係します。" - -msgid "Management" -msgstr "マネジメント" - -msgid "" -"Managing the Block Storage API daemon with the Pacemaker cluster manager" -msgstr "" -"Pacemaker クラスターマネージャーを用いた Block Storge API デーモンの管理" - -msgid "" -"Many services can act in an active/active capacity, however, they usually " -"require an external mechanism for distributing requests to one of the " -"available instances. The proxy server can serve this role." -msgstr "" -"ほとんどのサービスがアクティブ/アクティブ機能で動作できます。しかしながら、通" -"常は分散されたリクエストが利用できるインスタンスのどれかになる外部機能が必要" -"になります。プロキシーサーバーはこの役割になれます。" - -msgid "Maximum number of network nodes to use for the HA router." -msgstr "HA ルーターのために使用するネットワークノードの最大数" - -msgid "" -"Maximum retries with trying to connect to RabbitMQ (infinite by default):" -msgstr "RabbitMQ に接続を試行する最大回数 (デフォルトで無制限):" - -msgid "Memcached" -msgstr "Memcached" - -msgid "" -"Memcached is a general-purpose distributed memory caching system. It is used " -"to speed up dynamic database-driven websites by caching data and objects in " -"RAM to reduce the number of times an external data source must be read." -msgstr "" -"Memcached は汎用の分散メモリーキャッシュシステムです。データやオブジェクトを" -"メモリーにキャッシュすることにより、外部データソースの読み込み回数を減らし、" -"データベースを利用した動的 Web サイトを高速化するために使用されます。" - -msgid "" -"Memcached is a memory cache demon that can be used by most OpenStack " -"services to store ephemeral data, such as tokens." -msgstr "" -"Memcached は、ほとんどの OpenStack サービスがトークンなどの一時的なデータを保" -"存するために使用できる、メモリーキャッシュのデーモンです。" - -msgid "" -"Memcached uses a timeout value, which should always be set to a value that " -"is higher than the heartbeat value set for Telemetry." -msgstr "" -"Memcached は、タイムアウト値を使用します。これは、Telemetry 向けに設定された" -"ハートビート値よりも大きい値を常に設定されるべきです。" - -msgid "Memory" -msgstr "メモリー" - -msgid "" -"Memory caching is managed by `oslo.cache `_. This " -"ensures consistency across all projects when using multiple Memcached " -"servers. The following is an example configuration with three hosts:" -msgstr "" -"メモリーキャッシュは `oslo.cache `_ により管理されます。これに" -"より、複数の Memcached サーバーの使用時に全プロジェクト間で一貫性を保証できま" -"す。以下の例は 3 ノードの設定例です。" - -msgid "Messaging service for high availability" -msgstr "メッセージサービスの高可用性" - -msgid "" -"Minimum number of network nodes to use for the HA router. A new router can " -"be created only if this number of network nodes are available." -msgstr "" -"HA ルーターのために使用するネットワークノードの最小数。この数だけのネットワー" -"クノードを利用できる場合のみ、新規ルーターを作成できます。" - -msgid "" -"Mirrored queues in RabbitMQ improve the availability of service since it is " -"resilient to failures." -msgstr "" -"RabbitMQ のキューミラーは、障害耐性があるので、サービスの可用性を改善します。" - -msgid "Mixed" -msgstr "Mixed" - -msgid "MongoDB" -msgstr "MongoDB" - -msgid "More information is available in the RabbitMQ documentation:" -msgstr "詳細は RabbitMQ のドキュメントにあります。" - -msgid "" -"Most OpenStack services can use Memcached to store ephemeral data such as " -"tokens. Although Memcached does not support typical forms of redundancy such " -"as clustering, OpenStack services can use almost any number of instances by " -"configuring multiple hostnames or IP addresses." -msgstr "" -"ほとんどの OpenStack サービスは、トークンなどの一時データを保存するために " -"Memcached を使用できます。Memcached はクラスターなどの一般的な形式の冗長化を" -"サポートしませんが、OpenStack サービスは複数のホスト名や IP アドレスを設定す" -"ることにより、ほぼ任意の数のインスタンスを使用できます。" - -msgid "" -"Most distributions ship an example configuration file (:file:`corosync.conf." -"example`) as part of the documentation bundled with the Corosync package. An " -"example Corosync configuration file is shown below:" -msgstr "" -"ほとんどのディストリビューションは、Corosync パッケージに同梱されているドキュ" -"メントの一部として、サンプル設定ファイル (:file:`corosync.conf.example`) を同" -"梱しています。" - -msgid "" -"Most high availability systems fail in the event of multiple independent " -"(non-consequential) failures. In this case, most implementations favor " -"protecting data over maintaining availability." -msgstr "" -"多くの高可用性システムは、複数の独立した (不連続な) 障害が発生すると停止しま" -"す。この場合、多くのシステムは可用性の維持よりデータを保護することを優先しま" -"す。" - -msgid "" -"Most high availability systems guarantee protection against system downtime " -"and data loss only in the event of a single failure. However, they are also " -"expected to protect against cascading failures, where a single failure " -"deteriorates into a series of consequential failures. Many service providers " -"guarantee a :term:`Service Level Agreement (SLA)` including uptime " -"percentage of computing service, which is calculated based on the available " -"time and system downtime excluding planned outage time." -msgstr "" -"多くの高可用性システムは、単一障害事象のみにおいて、システム停止時間やデータ" -"損失に対する保護を保証します。しかしながら、単一障害が一連の障害を悪化させて" -"いく、段階的な障害に対しても保護されることが期待されます。多くのサービスプロ" -"バイダーは、コンピューティングサービスの稼働率などの :term:`Service Level " -"Agreement (SLA)` を保証します。それは、計画停止を除くシステム停止時間と稼働時" -"間に基づいて計算されます。" - -msgid "" -"Multicast groups (``mcastaddr``) must not be reused across cluster " -"boundaries. No two distinct clusters should ever use the same multicast " -"group. Be sure to select multicast addresses compliant with `RFC 2365, " -"\"Administratively Scoped IP Multicast\" `_." -msgstr "" -"マルチキャストグループ (``mcastaddr``) は、クラスターの境界を越えて再利用でき" -"ません。2 つの独立したクラスターは、同じマルチキャストグループを使用すべきで" -"はありません。選択したマルチキャストアドレス をきちんと`RFC 2365, " -"\"Administratively Scoped IP Multicast\" `_ に準拠させてください。" - -msgid "" -"MySQL databases, including MariaDB and Percona XtraDB, manage their " -"configurations using a ``my.cnf`` file, which is typically located in the ``/" -"etc`` directory. Configuration options available in these databases are also " -"available in Galera Cluster, with some restrictions and several additions." -msgstr "" -"MariaDB や Percona XtraDB を含む、MySQL は ``my.cnf`` ファイルを使用して設定" -"を管理します。一般的に ``/etc`` ディレクトリーにあります。これらのデータベー" -"スにおいて利用できる設定オプションは、Galera Cluster においても利用できます。" -"いくつかの制約や追加があります。" - -msgid "NIC" -msgstr "NIC" - -msgid "Network components, such as switches and routers" -msgstr "スイッチやルーターなどのネットワークの構成要素" - -msgid "Networking L2 agent" -msgstr "Neutron L2 エージェント" - -msgid "No firewalls between the hosts" -msgstr "ホスト間のファイアウォールなし" - -msgid "Node type" -msgstr "ノード種別" - -msgid "Note the following about the recommended interface configuration:" -msgstr "インターフェースの推奨設定に関する注意事項がいくつかあります。" - -msgid "Note the following:" -msgstr "以下に注意してください。" - -msgid "" -"Older versions of some distributions, which do not have an up-to-date policy " -"for securing Galera, may also require SELinux to be more relaxed about " -"database access and actions:" -msgstr "" -"いくつかのディストリビューションの古いバージョンは、Galera をセキュア化するた" -"めの最新ポリシーを提供していません。データベースへのアクセスと操作のために " -"SELinux をもう少しゆるく設定する必要があるかもしれません。" - -msgid "On CentOS, RHEL, openSUSE, and SLES:" -msgstr "CentOS、RHEL、openSUSE、SLES の場合:" - -msgid "" -"On RHEL-based systems, create resources for cinder's systemd agents and " -"create constraints to enforce startup/shutdown ordering:" -msgstr "" -"RHEL 系のシステムでは、cinder の systemd エージェント向けリソースを作成して、" -"起動と停止の順番を強制する制約を作成します。" - -msgid "" -"On ``3306``, Galera Cluster uses TCP for database client connections and " -"State Snapshot Transfers methods that require the client, (that is, " -"``mysqldump``)." -msgstr "" -"``3306`` では、Galera Cluster がデータベースクライアント接続のために TCP を使" -"用します。また、クライアント 、つまり ``mysqldump`` を必要とする State " -"Snapshot Transfers メソッドを使用します。" - -msgid "" -"On ``4444``, Galera Cluster uses TCP for all other State Snapshot Transfer " -"methods." -msgstr "" -"``4444`` では、Galera Cluster が他のすべての State Snapshot Transfer メソッド" -"のために TCP を使用します。" - -msgid "" -"On ``4567``, Galera Cluster uses TCP for replication traffic. Multicast " -"replication uses both TCP and UDP on this port." -msgstr "" -"``4567`` では、Galera Cluster が複製通信のために TCP を使用します。マルチキャ" -"ストレプリケーションは、このポートで TCP と UDP を使用します。" - -msgid "On ``4568``, Galera Cluster uses TCP for Incremental State Transfers." -msgstr "" -"``4568`` では、Galera Cluster が Incremental State Transfers のために TCP を" -"使用します。" - -msgid "" -"On any host that is meant to be part of a Pacemaker cluster, establish " -"cluster communications through the Corosync messaging layer. This involves " -"installing the following packages (and their dependencies, which your " -"package manager usually installs automatically):" -msgstr "" -"Pacemaker クラスターに参加させる各ホストで、まず Corosync メッセージレイヤー" -"でクラスター通信を確立します。これには、以下のパッケージをインストールする必" -"要があります (依存パッケージも含みます。依存パッケージは通常パッケージマネー" -"ジャーにより自動的にインストールされます)。" - -msgid "" -"On each target node, verify the correct owner, group, and permissions of the " -"file :file:`erlang.cookie`:" -msgstr "" -"各ターゲットノードにおいて、 :file:`erlang.cookie` の所有者、所有グループ、" -"パーミッションが正しいことを確認します。" - -msgid "" -"On the infrastructure layer, the SLA is the time for which RabbitMQ cluster " -"reassembles. Several cases are possible. The Mnesia keeper node is the " -"master of the corresponding Pacemaker resource for RabbitMQ. When it fails, " -"the result is a full AMQP cluster downtime interval. Normally, its SLA is no " -"more than several minutes. Failure of another node that is a slave of the " -"corresponding Pacemaker resource for RabbitMQ results in no AMQP cluster " -"downtime at all." -msgstr "" -"インフラ層では、SLA は RabbitMQ クラスターが再構成されるまでの時間です。いく" -"つかの場合では実現できます。Mnesia keeper ノードは、対応する RabbitMQ 用 " -"Pacemaker リソースのマスターです。停止したとき、結果として AMQP クラスターの" -"停止時間になります。通常、その SLA は、数分間より長くなることはありません。対" -"応する RabbitMQ 用 Pacemaker リソースのスレーブになっている、他のノードの停止" -"により AMQP クラスターが停止することはありません。" - -msgid "" -"Once completed, commit your configuration changes by entering :command:" -"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " -"Block Storage API service and its dependent resources on one of your nodes." -msgstr "" -"これらの手順の完了後、:command:`crm configure` メニューから :command:" -"`commit` と入力し、設定の変更をコミットします。Pacemaker は Block Storage " -"API サービスおよび依存するリソースを同じノードに起動します。" - -msgid "" -"Once created, synchronize the :file:`corosync.conf` file (and the :file:" -"`authkey` file if the secauth option is enabled) across all cluster nodes." -msgstr "" -"作成され同期された後、 :file:`corosync.conf` ファイル (および、secauth オプ" -"ションが有効化されている場合、 :file:`authkey` ファイル) が、すべてのクラス" -"ターノードにわたり同期されます。" - -msgid "" -"Once the database server starts, check the cluster status using the " -"``wsrep_cluster_size`` status variable. From the database client, run the " -"following command:" -msgstr "" -"データベースサーバーが起動すると、``wsrep_cluster_size`` 状態変数を使用して、" -"クラスター状態を確認します。データベースクライアントから、以下のコマンドを実" -"行します。" - -msgid "" -"One physical server can support multiple nodes, each of which supports " -"almost any number of network interfaces." -msgstr "" -"1 台の物理サーバーで複数のノードを構築できます。各ノードは複数のネットワーク" -"インターフェースを持てます。" - -msgid "" -"Only one instance for the central and compute agent service(s) is able to " -"run and function correctly if the ``backend_url`` option is not set." -msgstr "" -"``backend_url`` オプションが設定されていない場合、中央エージェントとコン" -"ピュートエージェントのサービスのインスタンスが 、1 つだけ動作できて正しく機能" -"します。" - -msgid "" -"OpenStack APIs: APIs that are HTTP(s) stateless services written in python, " -"easy to duplicate and mostly easy to load balance." -msgstr "" -"OpenStack API: これらは HTTP のステートレスサービスです。Python で書かれてい" -"て、簡単に冗長化でき、かなり簡単に負荷分散できます。" - -msgid "OpenStack Block Storage" -msgstr "OpenStack Block Storage" - -msgid "OpenStack Compute" -msgstr "OpenStack Compute" - -msgid "OpenStack High Availability Guide" -msgstr "OpenStack 高可用性ガイド" - -msgid "OpenStack Networking" -msgstr "OpenStack Networking" - -msgid "" -"OpenStack currently meets such availability requirements for its own " -"infrastructure services, meaning that an uptime of 99.99% is feasible for " -"the OpenStack infrastructure proper. However, OpenStack does not guarantee " -"99.99% availability for individual guest instances." -msgstr "" -"OpenStack 自体のインフラストラクチャーは、現在その可用性要件を満たせます。つ" -"まり、適切な OpenStack インフラストラクチャーの 99.99% の稼働率が実現可能で" -"す。しかしながら、OpenStack は個々のゲストインスタンスの可用性 99.99% を保証" -"できません。" - -msgid "" -"OpenStack does not require a significant amount of resources and the " -"following minimum requirements should support a proof-of-concept high " -"availability environment with core services and several instances:" -msgstr "" -"OpenStack は膨大なリソースを必要としません。以下の最小要件は、コアサービスと" -"いくつかのインスタンスを動かす検証 (POC) 環境には対応できることでしょう。" - -msgid "" -"OpenStack is a set of services exposed to the end users as HTTP(s) APIs. " -"Additionally, for your own internal usage, OpenStack requires an SQL " -"database server and AMQP broker. The physical servers, where all the " -"components are running, are called controllers. This modular OpenStack " -"architecture allows you to duplicate all the components and run them on " -"different controllers. By making all the components redundant, it is " -"possible to make OpenStack highly available." -msgstr "" -"OpenStack は、HTTP(s) API としてエンドユーザーに公開されるサービス群です。さ" -"らに、その内部利用のために、OpenStack は SQL データベースサーバーと AMQP ブ" -"ローカーを必要とします。すべてのコンポーネントが動作している、物理サーバーは" -"よくコントローラーと呼ばれます。このモジュール型の OpenStack アーキテクチャー" -"により、すべてのコンポーネントを複製して、それらを別々のコントローラーで実行" -"できます。すべてのコンポーネントを冗長にすることにより、OpenStack の高可用性" -"を実現できます。" - -msgid "OpenStack network nodes contain:" -msgstr "OpenStack ネットワークノードでは、以下のものが動作します。" - -msgid "" -"OpenStack services are configured with the list of these IP addresses so " -"they can select one of the addresses from those available." -msgstr "" -"OpenStack サービスは、利用できるものから 1 つを選択できるよう、これらの IP ア" -"ドレスの一覧を用いて設定されます。" - -msgid "" -"OpenStack supports a single-controller high availability mode that is " -"managed by the services that manage highly available environments but is not " -"actually highly available because no redundant controllers are configured to " -"use for failover. This environment can be used for study and demonstration " -"but is not appropriate for a production environment." -msgstr "" -"OpenStack は、シングルコントローラーの高可用性モードをサポートします。これ" -"は、高可用性環境を管理するソフトウェアにより、サービスが管理されますが、コン" -"トローラーがフェイルオーバーのために冗長化設定されていないため、実際には高可" -"用性ではありません。この環境は、学習やデモのために使用できますが、本番環境と" -"しては適していません。" - -msgid "Overview of highly available controllers" -msgstr "高可用性コントローラーの概要" - -msgid "Pacemaker cluster stack" -msgstr "Pacemaker クラスタースタック" - -msgid "" -"Pacemaker does not inherently understand the applications it manages. " -"Instead, it relies on resource agents (RAs) that are scripts that " -"encapsulate the knowledge of how to start, stop, and check the health of " -"each application managed by the cluster." -msgstr "" -"Pacemaker は、管理するアプリケーションを本質的に理解してません。代わりに、リ" -"ソースエージェント (RA) に依存します。これは、クラスターにより管理される各ア" -"プリケーションの起動、停止、ヘルスチェック方法に関する知識を隠蔽するスクリプ" -"トです。" - -msgid "" -"Pacemaker now starts the OpenStack Identity service and its dependent " -"resources on all of your nodes." -msgstr "" -"Pacemaker は OpenStack Identity API サービスおよび依存するリソースをすべての" -"ノードに起動します。" - -msgid "" -"Pacemaker now starts the Shared File Systems API service and its dependent " -"resources on one of your nodes." -msgstr "" -"Pacemaker は Shared File Systems API サービスおよび依存するリソースを同じノー" -"ドに起動します。" - -msgid "" -"Pacemaker ships with a large set of OCF agents (such as those managing MySQL " -"databases, virtual IP addresses, and RabbitMQ), but can also use any agents " -"already installed on your system and can be extended with your own (see the " -"`developer guide `_)." -msgstr "" -"Pacemaker は、(MySQL データベース、仮想 IP アドレス、RabbitMQ などの) OCF " -"エージェントをたくさん同梱していますが、お使いのシステムにインストールした任" -"意のエージェントも使用できます。また、自身で拡張することもできます " -"(`developer guide `_ 参照)。" - -msgid "" -"Pacemaker then starts the OpenStack Image API service and its dependent " -"resources on one of your nodes." -msgstr "" -"Pacemaker は OpenStack Image API サービスおよび依存するリソースを同じノードに" -"起動します。" - -msgid "" -"Pacemaker uses an event-driven approach to cluster state processing. The " -"``cluster-recheck-interval`` parameter (which defaults to 15 minutes) " -"defines the interval at which certain Pacemaker actions occur. It is usually " -"prudent to reduce this to a shorter interval, such as 5 or 3 minutes." -msgstr "" -"Pacemaker は、クラスターの状態を処理するために、イベントドリブンのアプローチ" -"を使用します。 ``cluster-recheck-interval`` パラメーター (デフォルトは 15 " -"分) が、ある Pacemaker のアクションが発生する間隔を定義します。通常、5 分や " -"3 分など、より短い間隔に減らすことは慎重になるべきです。" - -msgid "Parameter" -msgstr "パラメーター" - -msgid "" -"Persistent block storage can survive instance termination and can also be " -"moved across instances like any external storage device. Cinder also has " -"volume snapshots capability for backing up the volumes." -msgstr "" -"永続ブロックストレージは、インスタンス終了後に残存して、任意の外部ストレージ" -"デバイスのようにインスタンスを越えて移動できます。Cinder は、ボリュームをバッ" -"クアップするために、ボリュームスナップショット機能も持ちます。" - -msgid "" -"Persistent storage exists outside all instances. Two types of persistent " -"storage are provided:" -msgstr "" -"永続ストレージは、すべてのインスタンスの外部にあります。2 種類の永続ストレー" -"ジが提供されます。" - -msgid "Possible options are:" -msgstr "利用できるオプションは次のとおりです。" - -msgid "Prerequisites" -msgstr "前提条件" - -msgid "Processor Cores" -msgstr "プロセッサーのコア" - -msgid "" -"Production servers should run (at least) three RabbitMQ servers for testing " -"and demonstration purposes, however it is possible to run only two servers. " -"In this section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. " -"To build a broker, ensure that all nodes have the same Erlang cookie file." -msgstr "" -"本番サーバーは、(少なくとも) 3 つの RabbitMQ サーバーを実行すべきです。しかし" -"ながらテストやデモの目的の場合、サーバーを 2 つだけ実行することもできます。こ" -"のセクションでは、``rabbit1`` と ``rabbit2`` という 2 つのノードを設定しま" -"す。ブローカーを構築するために、すべてのノードがきちんと同じ Erlang クッキー" -"ファイルを持ちます。" - -msgid "Proxy server" -msgstr "プロキシーサーバー" - -msgid "Query the quorum status" -msgstr "クォーラム状態を問い合わせます" - -msgid "" -"Quorum becomes important when a failure causes the cluster to split in two " -"or more partitions. In this situation, you want the majority members of the " -"system to ensure the minority are truly dead (through fencing) and continue " -"to host resources. For a two-node cluster, no side has the majority and you " -"can end up in a situation where both sides fence each other, or both sides " -"are running the same services. This can lead to data corruption." -msgstr "" -"障害がクラスターを 2 つ以上のパーティションに分割した場合、クォーラムは重要に" -"なります。この状況では、システムの多数派のメンバーが、少数派を確実に (フェン" -"ス経由で) 停止させ、ホストリソースを継続することを確実にしたいでしょう。2 " -"ノードクラスターの場合、多数派になる側がなく、両方がお互いをフェンスする状" -"況、または両方が同じサービスを実行する状況になる可能性があります。これはデー" -"タ破損を引き起こします。" - -msgid "RAID drives" -msgstr "RAID ドライブ" - -msgid "RabbitMQ" -msgstr "RabbitMQ" - -msgid "" -"RabbitMQ nodes fail over on the application and the infrastructure layers." -msgstr "" -"RabbitMQ ノードは、アプリケーションとインフラ層の両方においてフェイルオーバー" -"します。" - -msgid "Receive notifications of quorum state changes" -msgstr "クォーラムの状態変更の通知を受け付けます" - -msgid "Recommended for testing." -msgstr "テスト向けの推奨。" - -msgid "Recommended solution by the Tooz project." -msgstr "Tooz プロジェクトによる推奨ソリューション。" - -msgid "Red Hat" -msgstr "Red Hat" - -msgid "Redundancy and failover" -msgstr "冗長性とフェールオーバー" - -msgid "" -"Regardless of which flavor you choose, we recommend that clusters contain at " -"least three nodes so that you can take advantage of `quorum `_." -msgstr "" -"選択するフレーバーに関わらず、クラスターは `quorum `_ の利点を得るた" -"めに、少なくとも 3 ノードを持つことを推奨します。" - -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database." -msgstr "" -"``CINDER_DBPASS`` を Block Storage データベース用に選択したパスワードで置き換" -"えます。" - -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database. Replace ``CINDER_PASS`` with the password you chose for the " -"``cinder`` user in the Identity service." -msgstr "" -"``CINDER_DBPASS`` を Block Storage サービス用に選択したパスワードで置き換えま" -"す。``CINDER_PASS`` を Identity サービスで ``cinder`` ユーザー用に選択したパ" -"スワードで置き換えます。" - -msgid "" -"Replace the IP addresses given here with comma-separated list of each " -"OpenStack database in your cluster." -msgstr "" -"ここで指定された IP アドレスを、お使いのクラスターにある OpenStack の各データ" -"ベースのコンマ区切りリストに置き換えます。" - -msgid "" -"Restart AppArmor. For servers that use ``init``, run the following command:" -msgstr "" -"AppArmor を再起動します。``init`` を使用するサーバーの場合、以下のコマンドを" -"実行します。" - -msgid "Restart the HAProxy service." -msgstr "HAProxy サービスを再起動します。" - -msgid "Restart the host or, to make changes work immediately, invoke:" -msgstr "" -"すぐに変更を反映するため、ホストを再起動します。または、以下を実行します。" - -msgid "Restarting the cluster" -msgstr "クラスターの再起動" - -msgid "Retry connecting with RabbitMQ:" -msgstr "RabbitMQ の接続を再試行します。" - -msgid "Run Networking DHCP agent" -msgstr "Networking DHCP エージェントの実行" - -msgid "Run Networking L3 agent" -msgstr "Networking L3 エージェントの実行" - -msgid "Run the following commands on each node except the first one:" -msgstr "1 番目のノード以外の各ノードで以下のコマンドを実行します。" - -msgid "" -"Run the following commands to download the OpenStack Identity resource to " -"Pacemaker:" -msgstr "" -"以下のコマンドを実行して、OpenStack Identity のリソースを Pacemaker にダウン" -"ロードします。" - -msgid "SELinux" -msgstr "SELinux" - -msgid "SELinux and AppArmor set to permit access to ``mysqld``" -msgstr "``mysqld`` へのアクセス許可を設定した SELinux や AppArmor" - -msgid "SUSE" -msgstr "SUSE" - -msgid "" -"SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE, use a " -"set of OCF agents for controlling OpenStack services." -msgstr "" -"SUSE Enterprise Linux、openSUSE などの SUSE 系ディストリビューションは、" -"OpenStack のサービスを制御するために OCF エージェント群を使用します。" - -msgid "" -"Security-Enhanced Linux is a kernel module for improving security on Linux " -"operating systems. It is commonly enabled and configured by default on Red " -"Hat-based distributions. In the context of Galera Cluster, systems with " -"SELinux may block the database service, keep it from starting, or prevent it " -"from establishing network connections with the cluster." -msgstr "" -"Security-Enhanced Linux は、Linux オペレーティングシステムにおいてセキュリ" -"ティーを向上させるためのカーネルモジュールです。Red Hat 系のディストリビュー" -"ションでは、一般的にデフォルトで有効化され、設定されています。Galera Cluster " -"の観点では、SELinux を有効化したシステムは、データベースサービスをブロックす" -"るかもしれません。また、クラスターを起動しても、ネットワーク接続を確立できな" -"いかもしれません。" - -msgid "Segregated" -msgstr "Segregated" - -msgid "" -"Services like RabbitMQ and Galera have complicated boot-up sequences that " -"require co-ordination, and often serialization, of startup operations across " -"all machines in the cluster. This is especially true after a site-wide " -"failure or shutdown where you must first determine the last machine to be " -"active." -msgstr "" -"RabbitMQ や Galera などのサービスは、複雑な起動順番を持ちます。クラスター内の" -"全マシンに渡り、起動処理の協調動作を必要とし、しばしば順番に実行する必要があ" -"ります。とくに、サイト全体の障害後、最後にアクティブにするマシンを判断する必" -"要のあるシャットダウンのときに当てはまります。" - -msgid "Set a password for hacluster user on each host:" -msgstr "各ホストにおいて hacluster ユーザーのパスワードを設定します。" - -msgid "Set automatic L3 agent failover for routers" -msgstr "ルーター向け L3 エージェントの自動フェイルオーバーの設定" - -msgid "Set basic cluster properties" -msgstr "基本的なクラスターのプロパティの設定" - -msgid "Set up Corosync with multicast" -msgstr "マルチキャストを使う場合の Corosync の設定" - -msgid "Set up Corosync with unicast" -msgstr "ユニキャストを使う場合の Corosync の設定" - -msgid "Set up Corosync with votequorum library" -msgstr "votequorum ライブラリーを使う場合の Corosync の設定" - -msgid "Set up the cluster with `crmsh`" -msgstr "`crmsh` を用いたクラスターのセットアップ" - -msgid "Set up the cluster with pcs" -msgstr "pcs を用いたセットアップ" - -msgid "" -"Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) " -"feature. By default, it is disabled (set to 0). If a cluster is on the " -"quorum edge (``expected_votes:`` set to 7; ``online nodes:`` set to 4) for " -"longer than the time specified for the ``last_man_standing_window`` " -"parameter, the cluster can recalculate quorum and continue operating even if " -"the next node will be lost. This logic is repeated until the number of " -"online nodes in the cluster reaches 2. In order to allow the cluster to step " -"down from 2 members to only 1, the ``auto_tie_breaker`` parameter needs to " -"be set. We do not recommended this for production environments." -msgstr "" -"``last_man_standing`` を 1 に設定することにより、Last Man Standing (LMS) 機能" -"を有効化できます。デフォルトで、無効化されています (0 に設定)。クラスターが、" -"``last_man_standing_window`` パラメーターに指定した時間より長く、クォーラム" -"エッジ (``expected_votes:`` が 7 に設定、 ``online nodes:`` が 4 に設定) にあ" -"る場合、クラスターはクォーラムを再計算して、次のノードが失われても動作を継続" -"します。この論理は、クラスターのオンラインノードが 2 になるまで繰り返されま" -"す。クラスターが 2 つのメンバーから 1 つだけに減ることを許可するために、 " -"``auto_tie_breaker`` パラメーターを設定する必要があります。これは本番環境では" -"推奨されません。" - -msgid "" -"Setting the ``pe-warn-series-max``, ``pe-input-series-max``, and ``pe-error-" -"series-max`` parameters to 1000 instructs Pacemaker to keep a longer history " -"of the inputs processed and errors and warnings generated by its Policy " -"Engine. This history is useful if you need to troubleshoot the cluster." -msgstr "" -"パラメーター ``pe-warn-series-max``, ``pe-input-series-max``, ``pe-error-" -"series-max`` を 1000 に設定することにより、Pacemaker が処理した入力履歴、ポリ" -"シーエンジンにより生成されたログと警告を保持するよう指定できます。この履歴" -"は、クラスターのトラブルシューティングを必要とする場合に役立ちます。" - -msgid "Simplified process for adding/removing of nodes" -msgstr "ノードの追加と削除を簡単化したプロセス" - -msgid "" -"Since all API access is directed to the proxy, adding or removing nodes has " -"no impact on the configuration of other services. This can be very useful in " -"upgrade scenarios where an entirely new set of machines can be configured " -"and tested in isolation before telling the proxy to direct traffic there " -"instead." -msgstr "" -"すべての API アクセスがプロキシーに向けられているので、ノードの追加や削除は、" -"他のサービスの設定に影響を与えません。これにより、プロキシーが新しいマシン群" -"に通信を向ける前に、それらを独立した環境において設定してテストする、アップグ" -"レードシナリオにおいて非常に役立ちます。" - -msgid "" -"Since the cluster is a single administrative domain, it is acceptable to use " -"the same password on all nodes." -msgstr "" -"クラスターは単一の管理ドメインなので、一般的にすべてのノードで同じパスワード" -"を使用できます。" - -msgid "Single-controller high availability mode" -msgstr "シングルコントローラーの高可用性モード" - -msgid "" -"Specifying ``corosync_votequorum`` enables the votequorum library. This is " -"the only required option." -msgstr "" -"``corosync_votequorum`` を指定することにより、votequorum ライブラリーを有効化" -"します。これは唯一の必須オプションです。" - -msgid "Start Corosync" -msgstr "Corosync の開始" - -msgid "Start Pacemaker" -msgstr "Pacemaker の開始" - -msgid "Start ``corosync`` with systemd unit file:" -msgstr "systemd ユニットファイルを用いた ``corosync`` の起動:" - -msgid "Start ``corosync`` with the LSB init script:" -msgstr "LSBinit スクリプトを用いた ``corosync`` の起動:" - -msgid "Start ``corosync`` with upstart:" -msgstr "upstart を用いた ``corosync`` の起動:" - -msgid "Start ``pacemaker`` with the LSB init script:" -msgstr "LSBinit スクリプトを用いた ``pacemaker`` の起動:" - -msgid "Start ``pacemaker`` with the systemd unit file:" -msgstr "systemd ユニットファイルを用いた ``pacemaker`` の起動:" - -msgid "Start ``pacemaker`` with upstart:" -msgstr "upstart を用いた ``pacemaker`` の起動:" - -msgid "" -"Start the ``xinetd`` daemon for ``clustercheck``. For servers that use " -"``init``, run the following commands:" -msgstr "" -"``clustercheck`` の ``xinetd`` デーモンを起動します。 ``init`` を使用するサー" -"バーの場合、以下のコマンドを実行します。" - -msgid "" -"Start the database server on all other cluster nodes. For servers that use " -"``init``, run the following command:" -msgstr "" -"すべての他のクラスターノードにおいてデータベースサーバーを起動します。" -"``init`` を使用するサーバーに対して、以下のコマンドを実行します。" - -msgid "" -"Start the message queue service on all nodes and configure it to start when " -"the system boots. On Ubuntu, it is configured by default." -msgstr "" -"すべてのノードにおいてメッセージキューサービスを起動し、システム起動時に起動" -"するよう設定します。Ubuntu の場合、デフォルトで設定されます。" - -msgid "Stateful service" -msgstr "ステートフルサービス" - -msgid "" -"Stateful services can be configured as active/passive or active/active, " -"which are defined as follows:" -msgstr "" -"ステートフルサービスは、アクティブ/パッシブまたはアクティブ/アクティブとして" -"設定できます。これらは以下のように定義されます。" - -msgid "Stateless service" -msgstr "ステートレスサービス" - -msgid "Stateless versus stateful services" -msgstr "ステートレスサービスとステートフルサービス" - -msgid "" -"Stop RabbitMQ and copy the cookie from the first node to each of the other " -"node(s):" -msgstr "" -"RabbitMQ を停止して、1 番目のノードから他のノードにクッキーをコピーします。" - -msgid "Storage" -msgstr "ストレージ" - -msgid "Storage back end" -msgstr "ストレージバックエンド" - -msgid "Storage components" -msgstr "ストレージ構成要素" - -msgid "" -"System downtime: Occurs when a user-facing service is unavailable beyond a " -"specified maximum amount of time." -msgstr "" -"システム停止時間: 指定された最大時間を超えて、ユーザーサービスが利用不可能に" -"なること。" - -msgid "Telemetry" -msgstr "Telemetry" - -msgid "Telemetry polling agent" -msgstr "Telemetry ポーリングエージェント" - -msgid "" -"The :command:`crm configure` command supports batch input. Copy and paste " -"the lines in the next step into your live Pacemaker configuration and then " -"make changes as required." -msgstr "" -":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " -"pacemaker 設定の中に上の行をコピー・ペーストし、適宜変更を反映できます。" - -msgid "" -"The :command:`crm configure` supports batch input. Copy and paste the lines " -"in the next step into your live Pacemaker configuration and then make " -"changes as required." -msgstr "" -":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " -"pacemaker 設定の中に上の行をコピー・ペーストし、適宜変更を反映できます。" - -msgid "" -"The :command:`crm configure` supports batch input. You may have to copy and " -"paste the above lines into your live Pacemaker configuration, and then make " -"changes as required." -msgstr "" -":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " -"Pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。" - -msgid "" -"The Block Storage service (cinder) that can use LVM or Ceph RBD as the " -"storage back end." -msgstr "" -"ストレージバックエンドとして LVM や Ceph RBD を使用できる Block Storage サー" -"ビス (cinder)。" - -msgid "" -"The Galera cluster configuration directive ``backup`` indicates that two of " -"the three controllers are standby nodes. This ensures that only one node " -"services write requests because OpenStack support for multi-node writes is " -"not yet production-ready." -msgstr "" -"この Galera cluster の設定ディレクティブ ``backup`` は、3 つのコントローラー" -"の内 2 つがスタンバイノードであることを意味します。" - -msgid "" -"The Image service (glance) that can use the Object Storage service (swift) " -"or Ceph RBD as the storage back end." -msgstr "" -"ストレージバックエンドとして Object Storage サービス (swift) や Ceph RBD を使" -"用できる Image サービス (glance)。" - -msgid "" -"The L2 agent cannot be distributed and highly available. Instead, it must be " -"installed on each data forwarding node to control the virtual network driver " -"such as Open vSwitch or Linux Bridge. One L2 agent runs per node and " -"controls its virtual interfaces." -msgstr "" -"分散させることはできず、高可用構成にはできません。その代わり、 L2 エージェン" -"トを各データ転送ノードにインストールして、 Open vSwitch や Linux ブリッジなど" -"の仮想ネットワークドライバーを制御します。ノードあたり 1 つの L2 エージェント" -"が動作し、そのノードの仮想インターフェースの制御を行います。" - -msgid "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance impacts only a percentage of the objects " -"and the client automatically removes it from the list of instances. The SLA " -"is several minutes." -msgstr "" -"Memcached クライアントは、インスタンス間でオブジェクトを分散するハッシュ機能" -"を持ちます。インスタンスの障害は、オブジェクトの使用率のみに影響します。クラ" -"イアントは、インスタンスの一覧から自動的に削除されます。SLA は数分です。" - -msgid "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance only impacts a percentage of the objects, " -"and the client automatically removes it from the list of instances." -msgstr "" -"Memcached クライアントは、インスタンス間でオブジェクトを分散するハッシュ機能" -"を持ちます。インスタンスの障害は、オブジェクトの使用率のみに影響します。クラ" -"イアントは、インスタンスの一覧から自動的に削除されます。" - -msgid "" -"The Networking (neutron) service L3 agent is scalable, due to the scheduler " -"that supports Virtual Router Redundancy Protocol (VRRP) to distribute " -"virtual routers across multiple nodes. For more information about the VRRP " -"and keepalived, see `Linux bridge: High availability using VRRP `_ and " -"`Open vSwitch: High availability using VRRP `_." -msgstr "" -"Networking (neutron) サービス L3 エージェントは、スケーラブルです。複数のノー" -"ドにわたり仮想ルーターを分散するために、スケジューラーが Virtual Router " -"Redundancy Protocol (VRRP) をサポートするためです。設定済みのルーターを高可用" -"化するために、 :file:`/etc/neutron/neutron.conf` ファイルを編集して、以下の値" -"を設定します。VRRP と keepalived の詳細は、`Linux bridge: High availability " -"using VRRP `_ および `Open vSwitch: High availability using VRRP `_ を参照" -"してください。" - -msgid "" -"The OpenStack Image service offers a service for discovering, registering, " -"and retrieving virtual machine images. To make the OpenStack Image API " -"service highly available in active/passive mode, you must:" -msgstr "" -"OpenStack Image サービスは、仮想マシンイメージを検索、登録、取得するための" -"サービスを提供します。OpenStack Image API サービスをアクティブ/パッシブモード" -"で高可用性にするために、以下が必要になります。" - -msgid "" -"The OpenStack Installation Guides also include a list of the services that " -"use passwords with important notes about using them." -msgstr "" -"OpenStack インストールガイドは、パスワードを使用するサービスの一覧、それらを" -"使用する上の重要な注意点もまとめてあります。" - -msgid "" -"The OpenStack Networking (neutron) service has a scheduler that lets you run " -"multiple agents across nodes. The DHCP agent can be natively highly " -"available." -msgstr "" -"OpenStack Networking (neutron) サービスには、ノードにまたがって複数のエージェ" -"ントを実行できるスケジューラーがあります。" - -msgid "The Pacemaker architecture" -msgstr "Pacemaker アーキテクチャー" - -msgid "" -"The Pacemaker service also requires an additional configuration file ``/etc/" -"corosync/uidgid.d/pacemaker`` to be created with the following content:" -msgstr "" -"Pacemaker サービスは、以下の内容で作成された、追加の設定ファイル ``/etc/" -"corosync/uidgid.d/pacemaker`` も必要とします。" - -msgid "" -"The SQL relational database server provides stateful type consumed by other " -"components. Supported databases are MySQL, MariaDB, and PostgreSQL. Making " -"the SQL database redundant is complex." -msgstr "" -"SQL リレーショナルデータベースサーバーは、他のコンポーネントにより利用される" -"ステートフルな状態を提供します。サポートされるデータベースは、MySQL、" -"MariaDB、PostgreSQL です。SQL データベースを冗長化することは複雑です。" - -msgid "" -"The Telemetry API service configuration does not have the ``option httpchk`` " -"directive as it cannot process this check properly." -msgstr "" -"Telemetry API サービスの設定は、このチェックを正常に実行できないため、 " -"``option httpchk`` ディレクティブがありません。" - -msgid "" -"The Telemetry polling agent can be configured to partition its polling " -"workload between multiple agents. This enables high availability (HA)." -msgstr "" -"Telemetry ポーリングエージェントは、複数のエージェント間でポーリングする負荷" -"を分割するよう設定できます。これにより、高可用性 (HA) を有効化できます。" - -msgid "" -"The `Telemetry service `_ provides a data collection service and an alarming " -"service." -msgstr "" -"`Telemetry サービス `_ は、データ収集サービスとアラームサービスを提供しま" -"す。" - -msgid "" -"The ``-p`` option is used to give the password on command line and makes it " -"easier to script." -msgstr "" -"``-p`` オプションは、コマンドラインにおいてパスワードを指定して、スクリプト化" -"しやすくするために使用されます。" - -msgid "" -"The ``admin_bind_host`` parameter lets you use a private network for admin " -"access." -msgstr "" -"``admin_bind_host`` パラメーターにより、管理アクセスのためのプライベートネッ" -"トワークを使用できます。" - -msgid "" -"The ``bindnetaddr`` is the network address of the interfaces to bind to. The " -"example uses two network addresses of /24 IPv4 subnets." -msgstr "" -"``bindnetaddr`` は、バインドするインターフェースのネットワークアドレスです。" -"この例は、2 つの /24 IPv4 サブネットを使用します。" - -msgid "" -"The ``token`` value specifies the time, in milliseconds, during which the " -"Corosync token is expected to be transmitted around the ring. When this " -"timeout expires, the token is declared lost, and after " -"``token_retransmits_before_loss_const lost`` tokens, the non-responding " -"processor (cluster node) is declared dead. ``token × " -"token_retransmits_before_loss_const`` is the maximum time a node is allowed " -"to not respond to cluster messages before being considered dead. The default " -"for token is 1000 milliseconds (1 second), with 4 allowed retransmits. These " -"defaults are intended to minimize failover times, but can cause frequent " -"false alarms and unintended failovers in case of short network " -"interruptions. The values used here are safer, albeit with slightly extended " -"failover times." -msgstr "" -"``token`` の値は、Corosync トークンがリング内を転送されることが予想される時間" -"をミリ秒単位で指定します。このタイムアウトを過ぎると、トークンが失われます。 " -"``token_retransmits_before_loss_const lost`` トークンの後、応答しないプロセッ" -"サー (クラスターノード) が停止していると宣言されます。 ``token × " -"token_retransmits_before_loss_const`` は、ノードが停止とみなされるまでに、ク" -"ラスターメッセージに応答しないことが許される最大時間です。トークン向けのデ" -"フォルトは、1000 ミリ秒 (1 秒)、4 回の再送許可です。これらのデフォルト値は、" -"フェイルオーバー時間を最小化することを意図していますが、頻繁な誤検知と短い" -"ネットワーク中断による意図しないフェイルオーバーを引き起こす可能性がありま" -"す。ここで使用される値は、フェイルオーバー時間がわずかに長くなりますが、より" -"安全です。" - -msgid "" -"The ``transport`` directive controls the transport mechanism. To avoid the " -"use of multicast entirely, specify the ``udpu`` unicast transport parameter. " -"This requires specifying the list of members in the ``nodelist`` directive. " -"This potentially makes up the membership before deployment. The default is " -"``udp``. The transport type can also be set to ``udpu`` or ``iba``." -msgstr "" -"``transport`` ディレクティブは使用するトランスポートメカニズムを制御します。 " -"マルチキャストを完全に無効にするためには、``udpu`` ユニキャストトランスポート" -"パラメーターを指定します。``nodelist`` ディレクティブにメンバー一覧を指定する" -"必要があります。展開する前にメンバーシップを構成することができます。デフォル" -"トは ``udp`` です。トランスポート形式は ``udpu`` や ``iba`` に設定することも" -"できます。" - -msgid "" -"The application layer is controlled by the ``oslo.messaging`` configuration " -"options for multiple AMQP hosts. If the AMQP node fails, the application " -"reconnects to the next one configured within the specified reconnect " -"interval. The specified reconnect interval constitutes its SLA." -msgstr "" -"アプリケーション層は、複数 AMQP ホスト向けの ``oslo.messaging`` 設定オプショ" -"ンにより制御されます。AMQP ノードが故障したとき、アプリケーションが、指定され" -"た再接続間隔で、設定された次のノードに再接続します。" - -msgid "" -"The architectural challenges of instance HA and several currently existing " -"solutions were presented in `a talk at the Austin summit `_, for which `slides are also available `_." -msgstr "" -"インスタンス HA のアーキテクチャー的な考慮事項と既存のソリューションは、" -"`Austin summit の講演 `_ にあります。ま" -"た `スライド `_ も参照できます。" - -msgid "" -"The architectures differ in the sets of services managed by the cluster." -msgstr "" -"アーキテクチャーは、クラスターにより管理されるサービス群により異なります。" - -msgid "" -"The availability check of the instances is provided by heartbeat messages. " -"When the connection with an instance is lost, the workload will be " -"reassigned within the remaining instances in the next polling cycle." -msgstr "" -"インスタンスの死活監視は、ハートビートメッセージによって提供されます。インス" -"タンスとの接続が失われた時、次のポーリングサイクルにて、ワークロードは、残っ" -"たインスタンスの中で再割り当てが行われます。" - -msgid "" -"The benefits of this approach are the physical isolation between components " -"and the ability to add capacity to specific components." -msgstr "" -"この方法の利点は、コンポーネント間の物理的な隔離、特定のコンポーネントへの" -"キャパシティーの追加です。" - -msgid "" -"The cloud controller runs on the management network and must talk to all " -"other services." -msgstr "" -"クラウドコントローラーは、管理ネットワークで動作し、他のすべてのサービスと通" -"信できる必要があります。" - -msgid "" -"The cluster is fully operational with ``expected_votes`` set to 7 nodes " -"(each node has 1 vote), quorum: 4. If a list of nodes is specified as " -"``nodelist``, the ``expected_votes`` value is ignored." -msgstr "" -"このクラスターは、7 ノード (各ノードが 1 つの投票権を持つ)、クォーラム 4 つに" -"設定した ``expected_votes`` で完全に動作します。ノードの一覧は ``nodelist`` " -"に指定された場合、 ``expected_votes`` の値は無視されます。" - -msgid "" -"The code for three of these solutions can be found online at the following " -"links:" -msgstr "" -"これら 3 つのソリューションのコードは、以下のリンクからオンライン参照できま" -"す。" - -msgid "" -"The command :command:`crm configure` supports batch input, copy and paste " -"the lines above into your live Pacemaker configuration and then make changes " -"as required. For example, you may enter ``edit p_ip_cinder-api`` from the :" -"command:`crm configure` menu and edit the resource to match your preferred " -"virtual IP address." -msgstr "" -":command:`crm configure` コマンドはバッチ入力をサポートします。現在の " -"Pacemaker 設定の中に上の行をコピー・ペーストし、適宜変更を反映できます。例え" -"ば、お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メ" -"ニューから ``edit p_ip_cinder-api`` と入力し、リソースを編集できます。" - -msgid "" -"The commands for installing RabbitMQ are specific to the Linux distribution " -"you are using." -msgstr "" -"RabbitMQ のインストールコマンドは、使用している Linux ディストリビューション" -"により異なります。" - -msgid "" -"The correct path to ``libgalera_smm.so`` given to the ``wsrep_provider`` " -"parameter" -msgstr "" -"``wsrep_provider`` パラメーターに指定された ``libgalera_smm.so`` への適切なパ" -"ス" - -msgid "" -"The first step is to install the database that sits at the heart of the " -"cluster. To implement high availability, run an instance of the database on " -"each controller node and use Galera Cluster to provide replication between " -"them. Galera Cluster is a synchronous multi-master database cluster, based " -"on MySQL and the InnoDB storage engine. It is a high-availability service " -"that provides high system uptime, no data loss, and scalability for growth." -msgstr "" -"最初の手順は、クラスターの中心になるデータベースをインストールすることです。" -"高可用性を実現するために、各コントローラーノードにおいてデータベースを実行" -"し、ノード間でレプリケーションできる Galera Cluster を使用します。Galera " -"Cluster は、MySQL と InnoDB ストレージエンジンをベースにした、同期型のマルチ" -"マスターデータベースクラスターです。高いシステム稼働時間、データ損失なし、ス" -"ケーラビリティーを提供する、高可用性サービスです。" - -msgid "The following are the definitions of stateless and stateful services:" -msgstr "以下は、ステートレスサービスとステートフルサービスの定義です。" - -msgid "The following are the standard hardware requirements:" -msgstr "標準ハードウェア要件:" - -msgid "" -"The following components are currently unable to benefit from the use of a " -"proxy server:" -msgstr "" -"以下のコンポーネントは、現在、プロキシサーバーの利用による利点はありません。" - -msgid "The following components/services can work with HA queues:" -msgstr "以下のコンポーネントやサービスは、HA キューを用いて動作できます。" - -msgid "" -"The following section(s) detail how to add the OpenStack Identity resource " -"to Pacemaker on SUSE and Red Hat." -msgstr "" -"以下のセクションは、SUSE と Red Hat において OpenStack Identity のリソースを " -"Pacemaker にダウンロードする方法を記載します。" - -msgid "" -"The majority of services, needing no real orchestration, are handled by " -"systemd on each node. This approach avoids the need to coordinate service " -"upgrades or location changes with the cluster and has the added advantage of " -"more easily scaling beyond Corosync's 16 node limit. However, it will " -"generally require the addition of an enterprise monitoring solution such as " -"Nagios or Sensu for those wanting centralized failure reporting." -msgstr "" -"実際のオーケストレーションを必要としない、大多数のサービスは各ノードにおいて " -"systemd により処理されます。このアプローチは、クラスターでサービスのアップグ" -"レードや位置の変更を調整する必要性を避けます。また、Corosync の 16 ノード制限" -"を超えて簡単にスケールするという利点を得られます。しかしながら一般的に、障害" -"レポートを集約するために、Nagios や Sensu のようなエンタープライズモニタリン" -"グソリューションを追加する必要があります。" - -msgid "" -"The most popular AMQP implementation used in OpenStack installations is " -"RabbitMQ." -msgstr "" -"OpenStack 環境に使用される最も一般的な AMQP ソフトウェアは RabbitMQ です。" - -msgid "" -"The proxy can be configured as a secondary mechanism for detecting service " -"failures. It can even be configured to look for nodes in a degraded state " -"(such as being too far behind in the replication) and take them out of " -"circulation." -msgstr "" -"プロキシーは、サービスの障害を検知するための 2 番目の機構として設定できます。" -"(長く複製から外れているなど) デグレード状態にあるノードを探して、それらを除外" -"するよう設定することもできます。" - -msgid "" -"The quorum specifies the minimal number of nodes that must be functional in " -"a cluster of redundant nodes in order for the cluster to remain functional. " -"When one node fails and failover transfers control to other nodes, the " -"system must ensure that data and processes remain sane. To determine this, " -"the contents of the remaining nodes are compared and, if there are " -"discrepancies, a majority rules algorithm is implemented." -msgstr "" -"クォーラムは、クラスターが機能し続けるために、冗長化されたノードのクラスター" -"において機能し続ける必要がある最小ノード数を指定します。あるノードが停止し" -"て、他のノードに制御がフェールオーバーするとき、システムはデータとプロセスが" -"維持されることを保証する必要があります。これを判断するために、残りのノードの" -"内容が比較される必要があります。また、不整合がある場合、多数決論理が実装され" -"ます。" - -msgid "" -"The service declaration for the Pacemaker service may be placed in the :file:" -"`corosync.conf` file directly or in its own separate file, :file:`/etc/" -"corosync/service.d/pacemaker`." -msgstr "" -"Pacemaker サービスに関するサービス定義は、直接 :file:`corosync.conf` ファイル" -"にあるか、単独ファイル :file:`/etc/corosync/service.d/pacemaker` にある可能性" -"があります。" - -msgid "The steps to implement the Pacemaker cluster stack are:" -msgstr "Pacemaker クラスタースタックを実行する手順は、次のとおりです。" - -msgid "" -"The votequorum library has been created to replace and eliminate ``qdisk``, " -"the disk-based quorum daemon for CMAN, from advanced cluster configurations." -msgstr "" -"votequorum ライブラリーは、高度なクラスター設定により、 ``qdisk`` 、CMAN 向け" -"ディスクベースのクォーラムデーモンを置き換えて除去するために作成されます。" - -msgid "" -"The votequorum library is part of the Corosync project. It provides an " -"interface to the vote-based quorum service and it must be explicitly enabled " -"in the Corosync configuration file. The main role of votequorum library is " -"to avoid split-brain situations, but it also provides a mechanism to:" -msgstr "" -"votequorum ライブラリーは Corosync プロジェクトの一部です。投票ベースのクォー" -"ラムサービスへのインターフェースを提供し、Corosync 設定ファイルにおいて明示的" -"に有効化する必要があります。votequorum ライブラリーのおもな役割は、スプリット" -"ブレイン状態を避けるためですが、以下の機能も提供します。" - -msgid "" -"These agents must conform to one of the `OCF `_, `SysV Init " -"`_, Upstart, or Systemd standards." -msgstr "" -"これらのエージェントは、 `OCF `_, `SysV Init `_, Upstart, Systemd 標準に従う必要があります。" - -msgid "This can be achieved using the :command:`iptables` command:" -msgstr "これは :command:`iptables` コマンドを使用して実現できます。" - -msgid "" -"This chapter describes the basic environment for high availability, such as " -"hardware, operating system, common services." -msgstr "" -"この章は高可用性を実現するための基本的な環境、例えばハードウェアやオペレー" -"ションシステム、共通サービスについて説明します。" - -msgid "" -"This chapter describes the shared services for high availability, such as " -"database, messaging service." -msgstr "" -"この章では、データベース、メッセージングサービスといった共有サービスの高可用" -"性について説明します。" - -msgid "" -"This configuration creates ``p_cinder-api``, a resource for managing the " -"Block Storage API service." -msgstr "" -"この設定は Block Storage API サービスを管理するためのリソース ``p_cinder-" -"api`` を作成します。" - -msgid "" -"This configuration creates ``p_glance-api``, a resource for managing the " -"OpenStack Image API service." -msgstr "" -"この設定は ``p_glance-api`` を作成します。これは OpenStack Image API サービス" -"を管理するリソースです。" - -msgid "" -"This configuration creates ``p_keystone``, a resource for managing the " -"OpenStack Identity service." -msgstr "" -"この設定は OpenStack Identity サービスを管理するためのリソース " -"``p_keystone`` を作成します。" - -msgid "" -"This configuration creates ``p_manila-api``, a resource for managing the " -"Shared File Systems API service." -msgstr "" -"この設定は Shared File Systems API サービスを管理するためのリソース " -"``p_manila-api`` を作成します。" - -msgid "" -"This configuration creates ``vip``, a virtual IP address for use by the API " -"node (``10.0.0.11``)." -msgstr "" -"この設定は、API ノード (``10.0.0.11``) により使用される仮想 IP アドレス " -"``vip`` を作成します。" - -msgid "" -"This document discusses some common methods of implementing highly available " -"systems, with an emphasis on the core OpenStack services and other open " -"source services that are closely aligned with OpenStack." -msgstr "" -"このドキュメントは、高可用性システムを実行する方法をいくつか議論します。コア" -"な OpenStack サービス、OpenStack とかなり一緒に使われる他のオープンソースサー" -"ビスを強調しています。" - -msgid "" -"This example assumes that you are using NFS for the physical storage, which " -"will almost never be true in a production installation." -msgstr "" -"この例は、物理ストレージに NFS を使用していることを仮定します。これは、ほとん" -"どの本番環境のインストールにおいて正しくありません。" - -msgid "This guide uses the following example IP addresses:" -msgstr "このガイドは、以下の IP アドレス例を使用します。" - -msgid "This is the most common option and the one we document here." -msgstr "これは最も一般的なオプションで、ここにドキュメント化します。" - -msgid "" -"This is why setting the quorum to a value less than ``floor(n/2) + 1`` is " -"dangerous. However it may be required for some specific cases, such as a " -"temporary measure at a point it is known with 100% certainty that the other " -"nodes are down." -msgstr "" -"これがクォーラムの値を ``floor(n/2) + 1`` より小さく設定することが危険な理由" -"です。しかしながら、いくつかの特別な場合に必要となる可能性があります。例え" -"ば、他のノードが 100% 確実に停止していることがわかっている場合の一時的な計測" -"などです。" - -msgid "" -"This scenario can be visualized as below, where each box below represents a " -"cluster of three or more guests." -msgstr "" -"このシナリオは、以下のように可視化できます。以下の各ボックスは 3 つ以上のゲス" -"トのクラスターを表します。" - -msgid "This scenario can be visualized as below." -msgstr "このシナリオは以下のように可視化できます。" - -msgid "" -"This scenario has the advantage of requiring far fewer, if more powerful, " -"machines. Additionally, being part of a single cluster allows you to " -"accurately model the ordering dependencies between components." -msgstr "" -"このシナリオは、より高性能ならば、より少ないマシンを必要とする利点がありま" -"す。加えて、シングルクラスターの一部になることにより、コンポーネント間の順序" -"依存関係を正確にモデル化できます。" - -msgid "" -"This section discusses ways to protect against data loss in your OpenStack " -"environment." -msgstr "" -"このセクションは、お使いの OpenStack 環境におけるデータ損失から保護する方法を" -"議論します。" - -msgid "" -"This value increments with each transaction, so the most advanced node has " -"the highest sequence number and therefore is the most up to date." -msgstr "" -"この値は各トランザクションによりインクリメントされます。ほとんどの高度なノー" -"ドは、最大のシーケンス番号を持つため、ほとんど最新です。" - -msgid "" -"To be sure that all data is highly available, ensure that everything is " -"stored in the MySQL database (which is also highly available):" -msgstr "" -"すべてのものを (高可用性) MySQL データベースに保存して、すべてのデータが高可" -"用性になっていることを確認します。" - -msgid "" -"To configure AppArmor to work with Galera Cluster, complete the following " -"steps on each cluster node:" -msgstr "" -"各クラスターノードにおいて以下の手順を実行して、Galera Cluster を正常に動作さ" -"せるために AppArmor を設定します。" - -msgid "" -"To configure SELinux to permit Galera Cluster to operate, you may need to " -"use the ``semanage`` utility to open the ports it uses. For example:" -msgstr "" -"SELinux を設定して Galera Cluster の動作を許可するために、``semanage`` ユー" -"ティリティーを使用して、使用ポートを開く必要があるかもしれません。例:" - -msgid "" -"To configure the number of DHCP agents per network, modify the " -"``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron." -"conf` file. By default this is set to 1. To achieve high availability, " -"assign more than one DHCP agent per network. For more information, see `High-" -"availability for DHCP `_." -msgstr "" -"ネットワークあたりの DHCP エージェント数を設定するには、 file:`/etc/neutron/" -"neutron.conf` ファイルの``dhcp_agents_per_network`` パラメーターを変更しま" -"す。このパラメーターのデフォルト値は 1 です。高可用性を持たせるには、ネット" -"ワークあたりの DHCP エージェント数を 1 以上にする必要があります。詳細は " -"`High-availability for DHCP `_ を参照してください。" - -msgid "" -"To enable high availability for configured routers, edit the :file:`/etc/" -"neutron/neutron.conf` file to set the following values:" -msgstr "" -"設定済みルーターを高可用性にするために、:file:`/etc/neutron/neutron.conf` " -"ファイルを編集し、以下の値を設定します。" - -msgid "" -"To enable the compute agent to run multiple instances simultaneously with " -"workload partitioning, the ``workload_partitioning`` option must be set to " -"``True`` under the `compute section `_ in the :file:`ceilometer.conf` configuration " -"file." -msgstr "" -"コンピュートエージェントがワークロード分割により同時に複数のインスタンスを実" -"行できるようにするために、``workload_partitioning`` オプションが :file:" -"`ceilometer.conf` 設定ファイルの `compute セクション `_ において ``True`` に設定する必要" -"があります。" - -msgid "" -"To ensure that all queues except those with auto-generated names are " -"mirrored across all running nodes, set the ``ha-mode`` policy key to all by " -"running the following command on one of the nodes:" -msgstr "" -"自動生成された名前を持つキューを除いて、すべてのキューがすべての動作中のノー" -"ドで確実にミラーするために、以下のコマンドをどこかのノードで実行して、 ``ha-" -"mode`` ポリシーキーを all に設定します。" - -msgid "" -"To find the most advanced cluster node, you need to check the sequence " -"numbers, or the ``seqnos``, on the last committed transaction for each. You " -"can find this by viewing ``grastate.dat`` file in database directory:" -msgstr "" -"最も高度なクラスターノードを見つけるために、各ノードの最新コミットのトランザ" -"クションにある ``seqnos`` を確認する必要があります。データベースディレクト" -"リーにある ``grastate.dat`` ファイルを表示すると、これを見つけられます。" - -msgid "" -"To install and configure Memcached, read the `official documentation " -"`_." -msgstr "" -"Memcached をインストールして設定する方法は、 `公式ドキュメント `_ を参照してください。" - -msgid "To start the cluster, complete the following steps:" -msgstr "以下の手順を実行して、クラスターを起動します。" - -msgid "" -"Traditionally, Pacemaker has been positioned as an all-encompassing " -"solution. However, as OpenStack services have matured, they are increasingly " -"able to run in an active/active configuration and gracefully tolerate the " -"disappearance of the APIs on which they depend." -msgstr "" -"伝統的に、Pacemaker は全方位的なソリューションとして位置づけられてきました。" -"しかしながら、OpenStack サービスが成熟するにつれて、徐々にアクティブ/アクティ" -"ブ設定にて動作でき、依存している API の消失に自然に耐えられます。" - -msgid "True" -msgstr "True (真)" - -msgid "" -"Typically, an active/active installation for a stateless service maintains a " -"redundant instance, and requests are load balanced using a virtual IP " -"address and a load balancer such as HAProxy." -msgstr "" -"一般的にステートレスサービスをアクティブ / アクティブにインストールすると、冗" -"長なインスタンスを維持することになります。リクエストは HAProxy のような仮想 " -"IP アドレスとロードバランサーを使用して負荷分散されます。" - -msgid "Use HA queues in RabbitMQ (``x-ha-policy: all``):" -msgstr "RabbitMQ における HA キューの使用 (``x-ha-policy: all``):" - -msgid "" -"Use MySQL/Galera in active/passive mode to avoid deadlocks on ``SELECT ... " -"FOR UPDATE`` type queries (used, for example, by nova and neutron). This " -"issue is discussed in the following:" -msgstr "" -"MySQL/Galera をアクティブ/パッシブモードで使用して、 ``SELECT ... FOR " -"UPDATE`` のような形式のクエリーにおけるデッドロックを避けます (例えば、nova " -"や neutron により使用されます)。この問題は、以下で議論されています。" - -msgid "Use durable queues in RabbitMQ:" -msgstr "RabbitMQ での永続キューの使用:" - -msgid "" -"Use that password to authenticate to the nodes that will make up the cluster:" -msgstr "このパスワードを使用して、クラスターを構成するノードに認証します。" - -msgid "" -"Use the :command:`corosync-cfgtool` utility with the ``-s`` option to get a " -"summary of the health of the communication rings:" -msgstr "" -":command:`corosync-cfgtool` ユーティリティーに ``-s`` オプションを付けて実行" -"して、コミュニケーションリングの稼働状態の概要を取得します。" - -msgid "" -"Use the :command:`corosync-objctl` utility to dump the Corosync cluster " -"member list:" -msgstr "" -":command:`corosync-objctl` ユーティリティーを使用して、Corosync クラスターの" -"メンバー一覧を出力します。" - -msgid "Use these steps to configurate all services using RabbitMQ:" -msgstr "" -"これらの手順を使用して、RabbitMQ を使用するすべてのサービスを設定します。" - -msgid "Value" -msgstr "値" - -msgid "Verify that the nodes are running:" -msgstr "そのノードが動作していることを検証します。" - -msgid "Verify the cluster status:" -msgstr "クラスターの状態を確認します。" - -msgid "Virtualized hardware" -msgstr "仮想ハードウェア" - -msgid "" -"We do not recommend setting the quorum to a value less than ``floor(n/2) + " -"1`` as it would likely cause a split-brain in a face of network partitions." -msgstr "" -"クォーラムの値を ``floor(n/2) + 1`` より小さく設定することは推奨しません。こ" -"れはネットワーク分割の発生時にスプリットブレインを引き起こす可能性がありま" -"す。" - -msgid "" -"We recommend HAProxy as the load balancer, however, there are many " -"alternative load balancing solutions in the marketplace." -msgstr "" -"ロードバランサーとして HAProxy を推奨しますが、マーケットプレースにさまざまな" -"同等品があります。" - -msgid "" -"We recommend two primary architectures for making OpenStack highly available." -msgstr "" -"OpenStack の高可用性のために基本的な 2 つのアーキテクチャーを推奨します。" - -msgid "" -"We recommended that the maximum latency between any two controller nodes is " -"2 milliseconds. Although the cluster software can be tuned to operate at " -"higher latencies, some vendors insist on this value before agreeing to " -"support the installation." -msgstr "" -"すべての 2つのコントローラーノード間の最大レイテンシーが 2 ミリ秒であることを" -"推奨します。クラスターソフトウェアがより大きなレイテンシーで動作するよう" -"チューニングできますが、いくつかのベンダーはサポートする前にこの値を主張しま" -"す。" - -msgid "What is a cluster manager?" -msgstr "クラスターマネージャーとは?" - -msgid "" -"When Ceph RBD is used for ephemeral volumes as well as block and image " -"storage, it supports `live migration `_ of VMs with ephemeral drives. LVM only " -"supports live migration of volume-backed VMs." -msgstr "" -"Ceph RBD をブロックストレージやイメージストレージと同じように一時ストレージ用" -"に使用する場合、一時ボリュームを持つ仮想マシンの `ライブマイグレーション " -"` " -"がサポートされます。LVM のみがボリュームをバックエンドとした仮想マシンのライ" -"ブマイグレーションをサポートします。" - -msgid "" -"When configuring an OpenStack environment for study or demonstration " -"purposes, it is possible to turn off the quorum checking. Production systems " -"should always run with quorum enabled." -msgstr "" -"学習やデモの目的に OpenStack 環境を設定している場合、クォーラムのチェックを無" -"効化できます。本番システムは必ずクォーラムを有効化して実行すべきです。" - -msgid "" -"When each cluster node starts, it checks the IP addresses given to the " -"``wsrep_cluster_address`` parameter. It then attempts to establish network " -"connectivity with a database server running there. Once it establishes a " -"connection, it attempts to join the Primary Component, requesting a state " -"transfer as needed to bring itself into sync with the cluster." -msgstr "" -"各クラスターノードが起動したとき、``wsrep_cluster_address`` パラメーターに指" -"定された IP アドレスを確認して、それで動作しているデータベースサーバーへの" -"ネットワーク接続性を確立しようとします。接続が確立されると、クラスターを同期" -"するために必要となる状態転送を要求する、Primary Component に参加しようとしま" -"す。" - -msgid "" -"When four nodes fail simultaneously, the cluster would continue to function " -"as well. But if split to partitions of three and four nodes respectively, " -"the quorum of three would have made both sides to attempt to fence the other " -"and host resources. Without fencing enabled, it would go straight to running " -"two copies of each resource." -msgstr "" -"4 ノードが同時に停止するとき、クラスターは十分に動作し続けるでしょう。しか" -"し、ノードがそれぞれ 3 つと 4 つに分断された場合、3 つのクォーラムが両方で他" -"のノードとホストリソースをフェンスしようとするでしょう。フェンスを有効化して" -"いないと、各リソースの 2 つのコピーが動作し続けるでしょう。" - -msgid "" -"When installing highly available OpenStack on VMs, be sure that your " -"hypervisor permits promiscuous mode and disables MAC address filtering on " -"the external network." -msgstr "" -"仮想マシン上に高可用性 OpenStack をインストールする場合、ハイパーバイザーが外" -"部ネットワークにおいてプロミスキャスモードを許可して、MAC アドレスフィルタリ" -"ングを無効化していることを確認してください。" - -msgid "" -"When you finish installing and configuring the OpenStack database, you can " -"initialize the Galera Cluster." -msgstr "" -"OpenStack のデータベースをインストールして設定するとき、Galera Cluster を初期" -"化できます。" - -msgid "" -"When you have all cluster nodes started, log into the database client of any " -"cluster node and check the ``wsrep_cluster_size`` status variable again:" -msgstr "" -"クラスターノードをどれか起動したとき、どれか 1 つにデータベースクライアントか" -"らログインして、``wsrep_cluster_size`` 状態変数を再び確認します。" - -msgid "" -"When you start up a cluster (all nodes down) and set ``wait_for_all`` to 1, " -"the cluster quorum is held until all nodes are online and have joined the " -"cluster for the first time. This parameter is new in Corosync 2.0." -msgstr "" -"クラスター (全ノードダウン) を起動して、 ``wait_for_all`` を 1 に設定すると" -"き、クラスターのクォーラムはすべてのノードがオンラインになり、まずクラスター" -"に参加するまで保持されることを意味します。このパラメーターは Corosync 2.0 の" -"新機能です。" - -msgid "" -"When you use high availability, consider the hardware requirements needed " -"for your application." -msgstr "" -"高可用性にするとき、アプリケーションに必要となるハードウェア要件を考慮してく" -"ださい。" - -msgid "" -"While SYS-V init replacements like systemd can provide deterministic " -"recovery of a complex stack of services, the recovery is limited to one " -"machine and lacks the context of what is happening on other machines. This " -"context is crucial to determine the difference between a local failure, and " -"clean startup and recovery after a total site failure." -msgstr "" -"systemd のような SYS-V init の代替は、複雑なスタックのサービスにおける順序を" -"守った復旧を提供できますが、復旧は 1 台のマシンに限定され、他のマシンにおいて" -"起きたことを把握できません。このコンテキストは、ローカル障害間の違いを判断" -"し、全サイト障害から正常に起動して復旧するために重要です。" - -msgid "" -"While all of the configuration parameters available to the standard MySQL, " -"MariaDB, or Percona XtraDB database servers are available in Galera Cluster, " -"there are some that you must define an outset to avoid conflict or " -"unexpected behavior." -msgstr "" -"標準的な MySQL、MariaDB、Percona XtraDB データベースに利用できる設定パラメー" -"ターは Galera Cluster で利用できますが、競合や予期しない動作を避けるために始" -"めに定義する必要があるものがあります。" - -msgid "" -"While the application can still run after the failure of several instances, " -"it may not have sufficient capacity to serve the required volume of " -"requests. A cluster can automatically recover failed instances to prevent " -"additional load induced failures." -msgstr "" -"アプリケーションは、いくつかのインスタンスが故障した後も動作できますが、要求" -"されたリクエスト量を処理するための十分な容量がないかもしれません。クラスター" -"は自動的に故障したインスタンスを復旧して、さらなる負荷が障害を引き起こさない" -"ようにできます。" - -msgid "" -"With ``secauth`` enabled, Corosync nodes mutually authenticates using a 128-" -"byte shared secret stored in the :file:`/etc/corosync/authkey` file. This " -"can be generated with the :command:`corosync-keygen` utility. Cluster " -"communications are encrypted when using ``secauth``." -msgstr "" -"``secauth`` を有効化すると、Corosync ノードが :file:`/etc/corosync/authkey` " -"に保存された 128 バイトの共有シークレットを使用して相互に認証されます。これ" -"は、 :command:`corosync-keygen` ユーティリティーを使用して生成できます。 " -"``secauth`` を使用するとき、クラスター通信は暗号化されます。" - -msgid "" -"With this in mind, some vendors are restricting Pacemaker's use to services " -"that must operate in an active/passive mode (such as ``cinder-volume``), " -"those with multiple states (for example, Galera), and those with complex " -"bootstrapping procedures (such as RabbitMQ)." -msgstr "" -"この点を考慮して、いくつかのベンダーは、``cinder-volume`` などのアクティブ/" -"パッシブモードで動作させる必要があるサービス、Galera などの複数の状態を持つ" -"サービス、RabbitMQ のように複雑なブートストラップ手順を持つサービスに " -"Pacemaker を使用することを制限しています。" - -msgid "" -"Within the ``nodelist`` directive, it is possible to specify specific " -"information about the nodes in the cluster. The directive can contain only " -"the node sub-directive, which specifies every node that should be a member " -"of the membership, and where non-default options are needed. Every node must " -"have at least the ``ring0_addr`` field filled." -msgstr "" -"``nodelist`` ディレクティブに、クラスター内のノードに関する具体的な情報を指定" -"できます。このディレクティブは、node サブディレクティブのみを含められます。こ" -"れは、メンバーシップのすべてのメンバーを指定し、デフォルト以外に必要となるオ" -"プションを指定します。すべてのノードは、少なくとも ``ring0_addr`` の項目を入" -"力する必要があります。" - -msgid "" -"Work is in progress on a unified approach, which combines the best aspects " -"of existing upstream solutions. More details are available on `the HA VMs " -"user story wiki `_." -msgstr "" -"検討は統一された方法により進行中です。既存のアップストリームのソリューション" -"における利点を組み合わせます。詳細は `HA VMs user story wiki `_ にあります。" - -msgid "" -"You can achieve high availability for the OpenStack database in many " -"different ways, depending on the type of database that you want to use. " -"There are three implementations of Galera Cluster available to you:" -msgstr "" -"使用したいデータベースの種類に応じて、さまざまな情報で OpenStack のデータベー" -"スの高可用性を実現できます。Galera Cluster は 3 種類の実装があります。" - -msgid "" -"You can also ensure the availability by other means, using Keepalived or " -"Pacemaker." -msgstr "" -"他の手段として、Pacemaker や Keepalived を使用して可能性を確保することもでき" -"ます。" - -msgid "" -"You can have up to 16 cluster members (this is currently limited by the " -"ability of corosync to scale higher). In extreme cases, 32 and even up to 64 " -"nodes could be possible. However, this is not well tested." -msgstr "" -"クラスターのメンバーを 16 まで持てます (これは、corosync をよりスケールさせる" -"機能による、現在の制限です)。極端な場合、32 や 64 までのノードさえ利用できま" -"すが、十分にテストされていません。" - -msgid "" -"You can now add the Pacemaker configuration for Block Storage API resource. " -"Connect to the Pacemaker cluster with the :command:`crm configure` command " -"and add the following cluster resources:" -msgstr "" -"Block Storage API リソース用の Pacemaker 設定を追加できます。 :command:`crm " -"configure` を用いて Pacemaker クラスターに接続し、以下のクラスターリソースを" -"追加します。" - -msgid "" -"You can now check the ``corosync`` connectivity with one of these tools." -msgstr "corosyncの接続性をそれらのツールの一つで確認することができます。" - -msgid "" -"You can read more about these concerns on the `Red Hat Bugzilla `_ and there is a `psuedo " -"roadmap `_ " -"for addressing them upstream." -msgstr "" -"これらの課題の詳細は `Red Hat Bugzilla `_ にあります。また、アップストリームにおいて解決するための " -"`psuedo roadmap `_ があります。" - -msgid "" -"You can take periodic snap shots throughout the installation process and " -"roll back to a working configuration in the event of a problem." -msgstr "" -"インストール中に定期的にスナップショットを取得したり、問題発生時に動作する設" -"定にロールバックしたりできます。" - -msgid "You can use the `ping` command to find the latency between two servers." -msgstr "`ping` コマンドを使用して、サーバー間のレイテンシーを調べられます。" - -msgid "" -"You must also create the OpenStack Image API endpoint with this IP address. " -"If you are using both private and public IP addresses, create two virtual IP " -"addresses and define your endpoint. For example:" -msgstr "" -"この IP アドレスを用いて OpenStack Image API エンドポイントを作成する必要があ" -"ります。プライベート IP アドレスとパブリック IP アドレスを両方使用している場" -"合、2 つの仮想 IP アドレスを作成して、次のようにエンドポイントを定義します。" - -msgid "" -"You must configure a supported Tooz driver for the HA deployment of the " -"Telemetry services." -msgstr "" -"Telemetry サービスの高可用性デプロイのために、サポートされる Tooz ドライバー" -"を設定する必要があります。" - -msgid "You must create the Shared File Systems API endpoint with this IP." -msgstr "" -"この IP を用いて Shared File Systems API エンドポイントを作成する必要がありま" -"す。" - -msgid "" -"You must select and assign a virtual IP address (VIP) that can freely float " -"between cluster nodes." -msgstr "" -"クラスターノード間で自由に移動できる仮想 IP アドレス (VIP) を選択して割り当て" -"る必要があります。" - -msgid "" -"You must use the same name on every cluster node. The connection fails when " -"this value does not match." -msgstr "" -"すべてのクラスターノードにおいて同じ名前を使用する必要があります。この値が一" -"致しない場合、接続が失敗します。" - -msgid "" -"You only need to do this on one cluster node. Galera Cluster replicates the " -"user to all the others." -msgstr "" -"どれか 1 つのクラスターノードにおいてのみ実行する必要があります。Galera " -"Cluster が、他のすべてのノードにユーザーを複製します。" - -msgid "" -"You should see a ``status=joined`` entry for each of your constituent " -"cluster nodes." -msgstr "" -"構成している各クラスターノードが ``status=joined`` になっているはずです。" - -msgid "" -"You will need to address high availability concerns for any applications " -"software that you run on your OpenStack environment. The important thing is " -"to make sure that your services are redundant and available. How you achieve " -"that is up to you." -msgstr "" -"お使いの OpenStack 環境で動作するアプリケーションソフトウェアすべてに対する高" -"可用性の課題を解決する必要があります。重要なことは、お使いのサービスが冗長で" -"あり利用できることを確実にすることです。どのように実現するのかは、あなた自身" -"によります。" - -msgid "" -"You would choose this option if you prefer to have fewer but more powerful " -"boxes." -msgstr "より少数の高性能なマシンを好む場合、この選択肢を選択するでしょう。" - -msgid "" -"You would choose this option if you prefer to have more but less powerful " -"boxes." -msgstr "より多数の低性能なマシンを好む場合、この選択肢を選択するでしょう。" - -msgid "" -"Your OpenStack services must now point their Block Storage API configuration " -"to the highly available, virtual cluster IP address rather than a Block " -"Storage API server’s physical IP address as you would for a non-HA " -"environment." -msgstr "" -"OpenStack サービスは、非 HA 環境と同じように Block Storage API サーバーの物" -"理 IP アドレスを指定する代わりに、Block Storage API の設定が高可用性と仮想ク" -"ラスター IP アドレスを指し示す必要があります。" - -msgid "" -"Your OpenStack services must now point their OpenStack Image API " -"configuration to the highly available, virtual cluster IP address instead of " -"pointing to the physical IP address of an OpenStack Image API server as you " -"would in a non-HA cluster." -msgstr "" -"OpenStack サービスが、非 HA クラスターであるような OpenStack Image API サー" -"バーの物理 IP アドレスを指し示す代わりに、高可用性な仮想クラスター IP アドレ" -"スを指し示すように、それらの OpenStack Image API の設定を変更する必要がありま" -"す。" - -msgid "" -"Your OpenStack services must now point their Shared File Systems API " -"configuration to the highly available, virtual cluster IP address rather " -"than a Shared File Systems API server’s physical IP address as you would for " -"a non-HA environment." -msgstr "" -"OpenStack サービスは、通常の非高可用性環境のように、Shared File Systems API " -"サーバーの物理 IP アドレスを指定する代わりに、Shared File Systems API の設定" -"が高可用性と仮想クラスター IP アドレスを指し示す必要があります。" - -msgid "" -"Your OpenStack services now point their OpenStack Identity configuration to " -"the highly available virtual cluster IP address." -msgstr "" -"OpenStack サービスが OpenStack Identity サーバーの設定が高可用性と仮想クラス" -"ター IP アドレスを指し示します。" - -msgid "[TODO: need more discussion of these parameters]" -msgstr "[TODO: need more discussion of these parameters]" - -msgid "" -"`Ceph RBD `_ is an innately high availability storage " -"back end. It creates a storage cluster with multiple nodes that communicate " -"with each other to replicate and redistribute data dynamically. A Ceph RBD " -"storage cluster provides a single shared set of storage nodes that can " -"handle all classes of persistent and ephemeral data (glance, cinder, and " -"nova) that are required for OpenStack instances." -msgstr "" -"`Ceph RBD `_ は、本質的に高可用性なストレージバックエンド" -"です。複数のノードを用いてストレージクラスターを作成し、お互いに通信して動的" -"にレプリケーションとデータ再配布を実行します。Ceph RBD ストレージクラスター" -"は、OpenStack インスタンスに必要となる、すべての種類の永続データと一時データ " -"(glance、cinder、nova) を取り扱える、単一の共有ストレージノードを提供します。" - -msgid "`Clustering Guide `_" -msgstr "`Clustering Guide `_" - -msgid "`Debian and Ubuntu `_" -msgstr "`Debian および Ubuntu `_" - -msgid "" -"`Galera Cluster for MySQL `_: The MySQL reference " -"implementation from Codership, Oy." -msgstr "" -"`Galera Cluster for MySQL `_: The MySQL reference " -"implementation from Codership, Oy." - -msgid "`Highly Available Queues `_" -msgstr "`Highly Available Queues `_" - -msgid "" -"`IMPORTANT: MySQL Galera does *not* support SELECT ... FOR UPDATE `_" -msgstr "" -"`IMPORTANT: MySQL Galera does *not* support SELECT ... FOR UPDATE `_" - -msgid "" -"`MariaDB Galera Cluster `_: The MariaDB implementation " -"of Galera Cluster, which is commonly supported in environments based on Red " -"Hat distributions." -msgstr "" -"`MariaDB Galera Cluster `_: The MariaDB implementation " -"of Galera Cluster, which is commonly supported in environments based on Red " -"Hat distributions." - -msgid "" -"`Pacemaker `_ cluster stack is a state-of-the-art " -"high availability and load balancing stack for the Linux platform. Pacemaker " -"is used to make OpenStack infrastructure highly available." -msgstr "" -"`Pacemaker `_ クラスタースタックは、Linux プラット" -"フォーム向けの最高水準の高可用性と負荷分散を実現します。Pacemaker は " -"OpenStack インフラを高可用化するために役立ちます。" - -msgid "" -"`Percona XtraDB Cluster `_: The XtraDB " -"implementation of Galera Cluster from Percona." -msgstr "" -"`Percona XtraDB Cluster `_: The XtraDB " -"implementation of Galera Cluster from Percona." - -msgid "" -"`RPM based `_ (RHEL, Fedora, " -"CentOS, openSUSE)" -msgstr "" -"`RPM 系 `_ (RHEL, Fedora, CentOS, " -"openSUSE)" - -msgid "" -"`Understanding reservations, concurrency, and locking in Nova `_" -msgstr "" -"`Understanding reservations, concurrency, and locking in Nova `_" - -msgid "``crmsh``" -msgstr "``crmsh``" - -msgid "" -"``last_man_standing_window`` specifies the time, in milliseconds, required " -"to recalculate quorum after one or more hosts have been lost from the " -"cluster. To perform a new quorum recalculation, the cluster must have quorum " -"for at least the interval specified for ``last_man_standing_window``. The " -"default is 10000ms." -msgstr "" -"``last_man_standing_window`` は、1 つ以上のホストがクラスターから失われた後、" -"クォーラムを再計算するために必要となる時間をミリ秒単位で指定します。新しく" -"クォーラムを再計算するために、クラスターは少なくとも " -"``last_man_standing_window`` に指定された間隔はクォーラムを保持する必要があり" -"ます。デフォルトは 10000ms です。" - -msgid "" -"``nodeid`` is optional when using IPv4 and required when using IPv6. This is " -"a 32-bit value specifying the node identifier delivered to the cluster " -"membership service. If this is not specified with IPv4, the node ID is " -"determined from the 32-bit IP address of the system to which the system is " -"bound with ring identifier of 0. The node identifier value of zero is " -"reserved and should not be used." -msgstr "" -"``nodeid`` は、IPv4 を使用するときにオプション、IPv6 を使用するときに必須で" -"す。クラスターメンバーシップサービスに配信される、ノード識別子を指定する 32 " -"ビットの値です。IPv4 で指定されていない場合、ノード ID は、システムがリング識" -"別子 0 に割り当てた 32 ビットの IP アドレスになります。ノード識別子の値 0 " -"は、予約済みであり、使用してはいけません。" - -msgid "``pcs``" -msgstr "``pcs``" - -msgid "" -"``ring{X}_addr`` specifies the IP address of one of the nodes. ``{X}`` is " -"the ring number." -msgstr "" -"``ring{X}_addr`` は、1 つのノードの IP アドレスを指定します。 ``{X}`` はリン" -"グの番号です。" - -msgid "" -"`a mistral-based auto-recovery workflow `_, by Intel" -msgstr "" -"`a mistral-based auto-recovery workflow `_, by Intel" - -msgid "`corosync`" -msgstr "`corosync`" - -msgid "`fence-agents` (CentOS or RHEL) or cluster-glue" -msgstr "`fence-agents` (CentOS、RHEL) または cluster-glue" - -msgid "`libqb0`" -msgstr "`libqb0`" - -msgid "`masakari `_, by NTT" -msgstr "`masakari `_, by NTT" - -msgid "`pacemaker`" -msgstr "`pacemaker`" - -msgid "`pcs` (CentOS or RHEL) or crmsh" -msgstr "`pcs` (CentOS、RHEL) または crmsh" - -msgid "`resource-agents`" -msgstr "`resource-agents`" - -msgid "allow_automatic_l3agent_failover" -msgstr "allow_automatic_l3agent_failover" - -msgid "compute node" -msgstr "コンピュートノード" - -msgid "controller node" -msgstr "コントローラーノード" - -msgid "l3_ha" -msgstr "l3_ha" - -msgid "max_l3_agents_per_router" -msgstr "max_l3_agents_per_router" - -msgid "min_l3_agents_per_router" -msgstr "min_l3_agents_per_router" diff --git a/doc/ha-guide/source/locale/tr_TR/LC_MESSAGES/ha-guide.po b/doc/ha-guide/source/locale/tr_TR/LC_MESSAGES/ha-guide.po deleted file mode 100644 index 213c8bea6b..0000000000 --- a/doc/ha-guide/source/locale/tr_TR/LC_MESSAGES/ha-guide.po +++ /dev/null @@ -1,3971 +0,0 @@ -# işbaran akçayır , 2017. #zanata -msgid "" -msgstr "" -"Project-Id-Version: openstackhaguide\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2018-08-22 22:08+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-07-27 09:49+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language-Team: Turkish (Turkey)\n" -"Language: tr_TR\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"X-Generator: Zanata 4.3.3\n" -"X-POOTLE-MTIME: 1499419470.000000\n" - -msgid "**Cluster Address**: List the IP addresses for each cluster node." -msgstr "**Küme Adresi**: Her küme düğümü için IP adreslerini listeleyin." - -msgid "**Cluster Name**: Define an arbitrary name for your cluster." -msgstr "**Küme İsmi**: Kümeniz için isteğe göre bir isim tanımlayın." - -msgid "" -"**Corosync configuration file fragment for unicast (``corosync.conf``)**" -msgstr "" -"**Corosync yapılandırma dosyası tekil yayın içi parçalanır (``corosync." -"conf``)**" - -msgid "" -"**Example Corosync configuration file for multicast (``corosync.conf``)**" -msgstr "" -"**Çoklu yayın için örnek Corosync yapılandırma dosyası (``corosync.conf``)**" - -msgid "**Node Address**: Define the IP address of the cluster node." -msgstr "**Düğüm Adresi**: Küme düğümünün IP adresini tanımlayın." - -msgid "**Node Name**: Define the logical name of the cluster node." -msgstr "**Düğüm İsmi**: Küme düğümünün mantıksal ismini tanımlayın." - -msgid "" -"**wsrep Provider**: The Galera Replication Plugin serves as the ``wsrep`` " -"provider for Galera Cluster. It is installed on your system as the " -"``libgalera_smm.so`` file. Define the path to this file in your ``my.cnf``:" -msgstr "" -"**wsrep Sağlayıcı**: Galera Çoğaltma Eklentisi Galera Kümesi için ``wsrep`` " -"sağlayıcı olarak çalışır. Sisteminizde ``libgalera_smm.so`` dosyası olarak " -"yüklüdür. ``my.cnf`` dosyanızda bu dosyanın yolunu tanımlayın:" - -msgid "/etc/neutron/neutron.conf parameters for high availability" -msgstr "Yüksek kullanılırlık için /etc/neutron/neutron.conf parametreleri" - -msgid "12 GB" -msgstr "12 GB" - -msgid "12+ GB" -msgstr "12+ GB" - -msgid "120 GB" -msgstr "120 GB" - -msgid "120+ GB" -msgstr "120+ GB" - -msgid "2" -msgstr "2" - -msgid "2 or more" -msgstr "2 veya fazla" - -msgid "4" -msgstr "4" - -msgid "8+" -msgstr "8+" - -msgid ":doc:`Networking DHCP agent`" -msgstr ":doc:`Ağ DHCP aracısı`" - -msgid ":doc:`Neutron L3 agent`" -msgstr ":doc:`Neutron L3 aracısı`" - -msgid "" -":ref:`Configure OpenStack services to use RabbitMQ HA queues `" -msgstr "" -":ref:`OpenStack servislerini RabbitMQ HA kuyrukları kullanacak şekilde " -"yapılandır `" - -msgid ":ref:`Configure RabbitMQ for HA queues`" -msgstr ":ref:`HA kuyrukları için RabbitMQ yapılandırması`" - -msgid ":ref:`Install RabbitMQ`" -msgstr ":ref:`RabbitMQ kurulumu`" - -msgid ":ref:`corosync-multicast`" -msgstr ":ref:`corosync-multicast`" - -msgid ":ref:`corosync-unicast`" -msgstr ":ref:`corosync-unicast`" - -msgid ":ref:`corosync-votequorum`" -msgstr ":ref:`corosync-votequorum`" - -msgid ":ref:`glance-api-configure`" -msgstr ":ref:`glance-api-configure`" - -msgid ":ref:`glance-api-pacemaker`" -msgstr ":ref:`glance-api-pacemaker`" - -msgid ":ref:`glance-services`" -msgstr ":ref:`glance-services`" - -msgid ":ref:`ha-blockstorage-configure`" -msgstr ":ref:`ha-blockstorage-configure`" - -msgid ":ref:`ha-blockstorage-pacemaker`" -msgstr ":ref:`ha-blockstorage-pacemaker`" - -msgid ":ref:`ha-blockstorage-services`" -msgstr ":ref:`ha-blockstorage-services`" - -msgid ":ref:`ha-sharedfilesystems-configure`" -msgstr ":ref:`ha-sharedfilesystems-configure`" - -msgid ":ref:`ha-sharedfilesystems-pacemaker`" -msgstr ":ref:`ha-sharedfilesystems-pacemaker`" - -msgid ":ref:`ha-sharedfilesystems-services`" -msgstr ":ref:`ha-sharedfilesystems-services`" - -msgid ":ref:`identity-config-identity`" -msgstr ":ref:`identity-config-identity`" - -msgid ":ref:`identity-pacemaker`" -msgstr ":ref:`identity-pacemaker`" - -msgid ":ref:`identity-services-config`" -msgstr ":ref:`identity-services-config`" - -msgid ":ref:`pacemaker-cluster-properties`" -msgstr ":ref:`pacemaker-cluster-properties`" - -msgid ":ref:`pacemaker-corosync-setup`" -msgstr ":ref:`pacemaker-corosync-setup`" - -msgid ":ref:`pacemaker-corosync-start`" -msgstr ":ref:`pacemaker-corosync-start`" - -msgid ":ref:`pacemaker-install`" -msgstr ":ref:`pacemaker-install`" - -msgid ":ref:`pacemaker-start`" -msgstr ":ref:`pacemaker-start`" - -msgid "" -":term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack internal " -"stateful communication service." -msgstr "" -":term:`Gelişmiş İleti Kuyruklama İletişim Kuralı (AMQP)` OpenStack'e dahili " -"durumsal iletişim servisi sağlar." - -msgid ":term:`active/active configuration`" -msgstr ":term:`etkin/etkin yapılandırma`" - -msgid ":term:`active/passive configuration`" -msgstr ":term:`etkin/pasif yapılandırma`" - -msgid "" -"A crucial aspect of high availability is the elimination of single points of " -"failure (SPOFs). A SPOF is an individual piece of equipment or software that " -"causes system downtime or data loss if it fails. In order to eliminate " -"SPOFs, check that mechanisms exist for redundancy of:" -msgstr "" -"Yüksek kullanılırlığın önemli bir yönü tek bir arıza noktasını elemesidir " -"(SPOF'lar). SPOF arızalandığında sistemin kapalı kalmasına ya da veri " -"kaybına yol açan ekipman parçasıdır. SPOF'ları elemek için, aşağıdakiler " -"için yedekleme yöntemleriniz olduğundan emin olun:" - -msgid "A minimum of three hosts" -msgstr "En az üç sunucu" - -msgid "" -"A sample votequorum service configuration in the :file:`corosync.conf` file " -"is:" -msgstr "" -":file:`corosync.conf` dosyasında örnek bir votequorum servis yapılandırması " -"şöyledir:" - -msgid "" -"A service that provides a response after your request and then requires no " -"further attention. To make a stateless service highly available, you need to " -"provide redundant instances and load balance them. OpenStack services that " -"are stateless include ``nova-api``, ``nova-conductor``, ``glance-api``, " -"``keystone-api``, ``neutron-api``, and ``nova-scheduler``." -msgstr "" -"İsteğinizden sonra bir yanıt sağlayan ve ardından başka ilgi beklemeyen bir " -"servis. Durumsuz bir servisi yüksek kullanılabilir yapmak için, yedekli " -"sunucular kullanmalı ve yük dengelemesi kullanmalısınız. Durumsuz OpenStack " -"servisleri ``nova-api``, ``nova-conductor``, ``glance-api``, ``keystone-" -"api``, ``neutron-api``, ve ``nova-scheduler``i içerir." - -msgid "" -"A service where subsequent requests to the service depend on the results of " -"the first request. Stateful services are more difficult to manage because a " -"single action typically involves more than one request. Providing additional " -"instances and load balancing does not solve the problem. For example, if the " -"horizon user interface reset itself every time you went to a new page, it " -"would not be very useful. OpenStack services that are stateful include the " -"OpenStack database and message queue. Making stateful services highly " -"available can depend on whether you choose an active/passive or active/" -"active configuration." -msgstr "" -"Servise yapılan sıralı isteklerin ilk isteğin sonucuna bağlı olması. " -"Durumsal servisleri yönetmesi daha zordur çünkü tek bir eylem genellikle bir " -"istekten fazlasını içerir. Ek sunucular ve yük dengeleme eklemek sorunu " -"çözmez. Örneğin horizon kullanıcı arayüzü her yeni sayfa istediğinizde " -"kendisini sıfırlasa pek faydalı olmaz. OpenStack servislerini yüksek " -"kullanılır yapmak etkin/pasif veya etkin/etkin yapılandırma seçmenize bağlı " -"olarak değişebilir." - -msgid "" -"A shared implementation and calculation of `quorum `_" -msgstr "" -"`Yetersayının `_ " -"paylaşımlı uygulanması ve hesaplanması" - -msgid "" -"A single application does not have sufficient context to know the difference " -"between failure of a machine and failure of the application on a machine. " -"The usual practice is to assume the machine is dead and continue working, " -"however this is highly risky. A rogue process or machine could still be " -"responding to requests and generally causing havoc. The safer approach is to " -"make use of remotely accessible power switches and/or network switches and " -"SAN controllers to fence (isolate) the machine before continuing." -msgstr "" -"Tek bir uygulama bir makinenin arızası ve makine üzerindeki uygulamanın " -"arızası arasındaki farkı bilmek için yeterli içeriğe sahip değildir. Yaygın " -"olarak makinenin ölü olduğu varsayılır, ancak bunun riski yüksektir. Yaramaz " -"bir süreç veya makine hala isteklere yanıt veriyor ve büyük hasara sebep " -"oluyor olabilir. Güvenli yaklaşım uzaktan erişilebilir güç anahtarlarını ve/" -"veya ağ anahtalarını ve SAN denetleyicilerini kullanarak makineyi devam " -"etmeden önce parmaklığa almaktır (yalıtım)." - -msgid "" -"A typical active/active installation for a stateful service includes " -"redundant services, with all instances having an identical state. In other " -"words, updates to one instance of a database update all other instances. " -"This way a request to one instance is the same as a request to any other. A " -"load balancer manages the traffic to these systems, ensuring that " -"operational systems always handle the request." -msgstr "" -"Durumsal bir servis için genel bir etkin/etkin kurulumu tüm sunucuların aynı " -"durumda olduğu yedekli servisler içerir. Başka bir deyişle, bir veritabanı " -"sunucusuna yapılan güncelleme diğer tüm sunucuları da günceller. Bu yolla " -"bir sunucuya yapılan istek herhangi başka birine yapılanla aynı olur. Bir " -"yük dengeleyici bu sistemlere trafiği yönetir, işlevsel sistemlerin her " -"zaman isteği ele aldığından emin olur." - -msgid "" -"A typical active/passive installation for a stateful service maintains a " -"replacement resource that can be brought online when required. Requests are " -"handled using a :term:`virtual IP address (VIP)` that facilitates returning " -"to service with minimal reconfiguration. A separate application (such as " -"Pacemaker or Corosync) monitors these services, bringing the backup online " -"as necessary." -msgstr "" -"Durumsal bir servis için tipik bir etkin/pasif kurulum gerektiğinde " -"çevrimiçi yapılabilecek yedek bir kaynak bakımı da yapar. İstekler servise " -"asgari yeniden yapılandırma ile dönen bir :term:`sanal IP adresi (VIP)` " -"tarafından ele alınır. Ayrı bir uygulama (Pacemaker veya Corosync gibi) bu " -"servisleri izler, gerektiğinde yedeği çevrimiçi yapar." - -msgid "API isolation" -msgstr "API yalıtımı" - -msgid "Abstract" -msgstr "Özet" - -msgid "" -"Access to Memcached is not handled by HAProxy because replicated access is " -"currently in an experimental state. Instead, OpenStack services must be " -"supplied with the full list of hosts running Memcached." -msgstr "" -"Memcached erişimi HAProxy tarafından ele alınmaz çünkü yedekli erişim şu an " -"deneysel bir durumda. Bunun yerine, OpenStack servislerineMemcached " -"çalıştıran tüm sunucuların listesi verilmelidir." - -msgid "Active/passive versus active/active" -msgstr "Etkin/pasif karşısında etkin/etkin" - -msgid "Add Block Storage API resource to Pacemaker" -msgstr "Pacemaker'e Blok Depolama API kaynağı ekleyin" - -msgid "" -"Add HAProxy to the cluster and ensure the VIPs can only run on machines " -"where HAProxy is active:" -msgstr "" -"HAProxy'i kümeye ekleyin ve VIP'lerin yalnızca HAProxy'nin etkin olduğu " -"makinelerde çalıştığından emin olun:" - -msgid "Add OpenStack Identity resource to Pacemaker" -msgstr "OpenStack Kimlik kaynağını Pacemaker'e ekleyin" - -msgid "Add OpenStack Image API resource to Pacemaker" -msgstr "OpenStack İmaj API kaynağını Pacemaker'e ekleyin" - -msgid "Add Shared File Systems API resource to Pacemaker" -msgstr "Paylaşımlı Dosya Sistemleri API kaynağının Pacemaker'e eklenmesi" - -msgid "" -"Add the Pacemaker configuration for the OpenStack Identity resource by " -"running the following command to connect to the Pacemaker cluster:" -msgstr "" -"OpenStack Kimlik kaynağı için Pacemaker yapılandırmasını Pacemaker kümesine " -"bağlanmak için aşağıdaki komutları çalıştırarak ekleyin:" - -msgid "" -"Add the Pacemaker configuration for the OpenStack Image API resource. Use " -"the following command to connect to the Pacemaker cluster:" -msgstr "" -"OpenStack İmaj API kaynağı için Pacemaker yapılandırmasını ekleyin. " -"Pacemaker kümesine bağlanmak için aşağıdaki komutu kullanın:" - -msgid "" -"Add the Pacemaker configuration for the Shared File Systems API resource. " -"Connect to the Pacemaker cluster with the following command:" -msgstr "" -"Paylaşımlı Dosya Sistemleri API kaynağı için Pacemaker yapılandırmasını " -"ekleyin. Aşağıdaki komut ile Pacemaker kümesine bağlanın:" - -msgid "Add the following cluster resources:" -msgstr "Şu küme kaynaklarını ekleyin:" - -msgid "Additional parameters" -msgstr "Ek parametreler" - -msgid "" -"After installing the Corosync package, you must create the :file:`/etc/" -"corosync/corosync.conf` configuration file." -msgstr "" -"Corosync paketini kurduktan sonra, :file:`/etc/corosync/corosync.conf` " -"yapılandırma dosyasını oluşturmalısınız." - -msgid "" -"After the ``corosync`` service have been started and you have verified that " -"the cluster is communicating properly, you can start :command:`pacemakerd`, " -"the Pacemaker master control process. Choose one from the following four " -"ways to start it:" -msgstr "" -"``corosync`` servisini başlatıp kümenin düzgün iletişim sürdürdüğünü " -"doğruladıktan sonra, Pacemaker ana kontrol süreci :command:`pacemakerd`yi " -"başlatabilirsiniz. Başlatmak için aşağıdaki dört yoldan birini seçin:" - -msgid "" -"After the ``pacemaker`` service has started, Pacemaker creates a default " -"empty cluster configuration with no resources. Use the :command:`crm_mon` " -"utility to observe the status of ``pacemaker``:" -msgstr "" -"``pacemaker`` servisi başladıktan sonra, Pacemaker kaynakları olmayan " -"öntanımlı boş bir küme yapılandırması oluşturur. ``pacemaker`` durumunu " -"gözetlemek için :command:`crm_mon` aracını kullanın:" - -msgid "After you make these changes, commit the updated configuration." -msgstr "Bu değişiklikleri yaptıktan sonra, güncel yapılandırmayı gönderin." - -msgid "" -"After you set up your Pacemaker cluster, set a few basic cluster properties:" -msgstr "" -"Pacemaker kümenizi ayarladıktan sonra, temel birkaç küme özelliğini " -"ayarlayın:" - -msgid "All routers are highly available by default." -msgstr "Tüm yönlendiriciler öntanımlı olarak yüksek kullanılırlığa sahip." - -msgid "" -"Almost all services in this stack benefit from being proxied. Using a proxy " -"server provides the following capabilities:" -msgstr "" -"Bu yığındaki rerdeyse tüm servisler vekillenmekten faydalanır. Vekil sunucu " -"kullanmak şu yetenekleri sağlar:" - -msgid "" -"Alternatively, if the database server is running, use the " -"``wsrep_last_committed`` status variable:" -msgstr "" -"Alternatif olarak, veritabanı sunucusu çalışıyorsa, ``wsrep_last_committed`` " -"durum değişkenini kullanın:" - -msgid "" -"Alternatively, instead of using systemd agents, download and install the OCF " -"resource agent:" -msgstr "" -"Alternatif olarak, systemd aracıları kullanmak yerine, OCF kaynak " -"aracılarını indirin ve kurun:" - -msgid "" -"Alternatively, make modifications using the ``firewall-cmd`` utility for " -"FirewallD that is available on many Linux distributions:" -msgstr "" -"Alternatif olarak, çoğu Linux dağıtımında kullanılabilir olan FirewallID " -"için ``firewall-cmd`` aracını kullanarak değişiklikler yapın:" - -msgid "" -"Alternatively, you can use a commercial load balancer, which is hardware or " -"software. We recommend a hardware load balancer as it generally has good " -"performance." -msgstr "" -"Alternatif olarak, donanımsal veya yazılımsal ticari bir yük dengeleyici " -"kullanabilirsiniz. Genellikle daha iyi başarımı olduğundan donanımsal bir " -"yük dengeleyici öneriyoruz." - -msgid "Alternatively:" -msgstr "Alternatif olarak:" - -msgid "" -"An AMQP (Advanced Message Queuing Protocol) compliant message bus is " -"required for most OpenStack components in order to coordinate the execution " -"of jobs entered into the system." -msgstr "" -"Sisteme girilen işlerin çalıştırılmasını düzenlemek için çoğu OpenStack " -"bileşenleri için bir AMQP (Gelişmiş İleti Kuyruklama İletişim Kuralı) uyumlu " -"ileti yolu gereklidir." - -msgid "An OpenStack environment includes multiple data pools for the VMs:" -msgstr "OpenStack ortamı sanal makineler için birçok veri havuzu içerir:" - -msgid "" -"And the quorum could also have been set to three, just as a configuration " -"example." -msgstr "" -"Ve yetersayı yalnızca örnek bir yapılandırma olarak üçe ayarlanmış olabilir." - -msgid "AppArmor" -msgstr "AppArmor" - -msgid "AppArmor now permits Galera Cluster to operate." -msgstr "AppArmor artık Galera Kümesinin çalışmasına izin veriyor." - -msgid "Appendix" -msgstr "Ek Bölüm" - -msgid "" -"Application Armor is a kernel module for improving security on Linux " -"operating systems. It is developed by Canonical and commonly used on Ubuntu-" -"based distributions. In the context of Galera Cluster, systems with AppArmor " -"may block the database service from operating normally." -msgstr "" -"Uygulama Zırhı Linux işletim sistemlerinde güvenliği artırmak için " -"kullanılan bir çekirdek modülüdür. Canonical tarafından geliştirilmiştir ve " -"yaygın olarak Ubuntu tabanlı dağıtımlarda kullanılır. Galera Kümesi " -"kapsamında AppArmor'a sahip sistemlerin veritabanı servisinin normal " -"işleyişini engelleyebileceğini söyleyebiliriz." - -msgid "Applications and automatic service migration" -msgstr "Uygulamalar ve otomatik servis göçü" - -msgid "" -"As another option to make RabbitMQ highly available, RabbitMQ contains the " -"OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. " -"It provides the active/active RabbitMQ cluster with mirrored queues. For " -"more information, see `Auto-configuration of a cluster with a Pacemaker " -"`_." -msgstr "" -"RabbitMQ'yu yüksek kullanılır yapmak için başka bir seçenek de, RabbitMQ'nun " -"3.5.7 sürümünden itibaren Pacemaker küme kaynak aracıları için içerdiği OCF " -"betikleridir. Etkin/etkin RabbitMQ kümesine yansılı kuyruklar sağlar. Daha " -"fazla bilgi için `Pacemaker ile bir kümenin otomatik yapılandırılmasına " -"`_ göz atın." - -msgid "" -"As of September 2016, the OpenStack High Availability community is designing " -"and developing an official and unified way to provide high availability for " -"instances. We are developing automatic recovery from failures of hardware or " -"hypervisor-related software on the compute node, or other failures that " -"could prevent instances from functioning correctly, such as, issues with a " -"cinder volume I/O path." -msgstr "" -"Eylül 2016 itibariyle, OpenStack Yüksek Kullanılırlık topluluğu sunucular " -"için yüksek kullanılırlık sağlamak için resmi ve birleştirilmiş bir yol " -"tasarlıyor ve geliştiriyorlar. Hesaplama düğümünde donanımsal veya " -"hipervizörle ligili yazılımsal arızalardan, veya cinder birim I/O yoluyla " -"ilgili sunucuların düzgün işlemesine engel olan sorunlarda otomatik kurtarma " -"geliştiriyoruz." - -msgid "" -"At its core, a cluster is a distributed finite state machine capable of co-" -"ordinating the startup and recovery of inter-related services across a set " -"of machines." -msgstr "" -"Özünde, bir küme, dahili ilişkili servisleri bir makine kümesi arasında " -"başlatıp kurtarmayı düzenleme yetisine sahip dağıtık sonlu makinedir." - -msgid "Automated recovery of failed instances" -msgstr "Arızalı sunucuların otomatik kurtarılması" - -msgid "Awareness of instances on other machines" -msgstr "Diğer makinelerdeki sunuculardan haberdar olmak" - -msgid "Awareness of other applications in the stack" -msgstr "Yığındaki diğer uygulamaların farkında olmak" - -msgid "" -"Bear in mind, leaving SELinux in permissive mode is not a good security " -"practice. Over the longer term, you need to develop a security policy for " -"Galera Cluster and then switch SELinux back into enforcing mode." -msgstr "" -"SELinux'u izin veren kipte bırakmanın pek iyi bir güvenlik tercihi " -"olmadığını unutmayın. Uzun vadede, Galera Kümesi için bir güvenlik ilkesi " -"geliştirmeli ve SELinux'u tekrar etkin kipe geçirmelisiniz." - -msgid "" -"Before beginning, ensure you have read the `OpenStack Identity service " -"getting started documentation `_." -msgstr "" -"Başlamadan önce, `OpenStack Kimlik servisi başlama belgelendirmesini " -"`_ " -"okuduğunuzdan emin olun." - -msgid "" -"Before following this guide to configure the highly available OpenStack " -"cluster, ensure the IP ``10.0.0.11`` and hostname ``controller`` are not in " -"use." -msgstr "" -"Bu kılavuzu takip ederek yüksek kullanılır OpenStack kümesi yapılandırmadan " -"önce, ``10.0.0.11`` IP adresinin ve ``controller`` makine adının kullanımda " -"olmadığından emin olun." - -msgid "" -"Before you launch Galera Cluster, you need to configure the server and the " -"database to operate as part of the cluster." -msgstr "" -"Galera Kümesini başlatmadan önce sunucu ve veritabanını kümenin bir parçası " -"olarak çalışacak şekilde yapılandırmalısınız." - -msgid "" -"Both the central and the compute agent can run in an HA deployment. This " -"means that multiple instances of these services can run in parallel with " -"workload partitioning among these running instances." -msgstr "" -"Hem merkezi hem hesaplama aracısı HA bir kurulumda çalışabilirler. Bunun " -"anlamı bu servislerin çoklu şekilde paralel olarak iş yükünü çalışan " -"sunucular arasında dağıtarak çalışabileceğidir." - -msgid "" -"Both use a cluster manager, such as Pacemaker or Veritas, to orchestrate the " -"actions of the various services across a set of machines. Because we are " -"focused on FOSS, we refer to these as Pacemaker architectures." -msgstr "" -"Her biri makine kümeleri arasındaçi çeşitli eylemleri yönetmek için " -"Pacemaker veya Veritas gibi bir küme yönetici kullanır. FOSS üzerine " -"eğildiğimizden, bunlara Pacemaker mimarileri olarak başvuracağız." - -msgid "" -"By default, STONITH is enabled in Pacemaker, but STONITH mechanisms (to " -"shutdown a node via IPMI or ssh) are not configured. In this case Pacemaker " -"will refuse to start any resources. For production cluster it is recommended " -"to configure appropriate STONITH mechanisms. But for demo or testing " -"purposes STONITH can be disabled completely as follows:" -msgstr "" -"Öntanımlı olarak, STONITH Pacemaker'de etkindir, ama STONITH yöntemleri (bir " -"düğümü IPMI veya ssh ile kapatmak için) yapılandırılmamıştır. Bu durumda " -"Pacemaker herhangi bir kaynağı başlatmayı reddedecektir. Üretim kümesi için " -"uygun STONITH yöntemlerini yapılandırmanız önerilir. Tanıtım veya deneme " -"amaçlı olarak STONITH aşağıdaki gibi tamamen kapatılabilir:" - -msgid "" -"By default, ``controller1`` handles the caching service. If the host goes " -"down, ``controller2`` or ``controller3`` will complete the service." -msgstr "" -"Öntanımlı olarak, ``controller1`` önbellek servisini ele alır. Sunucu " -"kapanırsa, ``controller2`` veya ``controller3`` servisi tamamlayacaktır." - -msgid "" -"By default, cluster nodes do not start as part of a Primary Component. In " -"the Primary Component, replication and state transfers bring all databases " -"to the same state." -msgstr "" -"Öntanımlı olarak, küme düğümleri bir Birincil Bileşenin parçası olarak " -"başlamazlar. Birincil Bileşende, yedekleme ve durum aktarımları tüm " -"veritabanlarını aynı duruma getirir." - -msgid "" -"By sending all API access through the proxy, you can clearly identify " -"service interdependencies. You can also move them to locations other than " -"``localhost`` to increase capacity if the need arises." -msgstr "" -"Tüm API erişimini vekil aracılığıyla göndererek, servis dahili " -"bağımlılıklarını açıkça tanımlayabilirsiniz. Ayrıca ihtiyaç halinde " -"kapasiteyi artırmak için ``localhost`` dışında konumlara da taşıyabilirsiniz." - -msgid "Ceph" -msgstr "Ceph" - -msgid "" -"Ceph RBD provides object replication capabilities by storing Block Storage " -"volumes as Ceph RBD objects. Ceph RBD ensures that each replica of an object " -"is stored on a different node. This means that your volumes are protected " -"against hard drive and node failures, or even the failure of the data center " -"itself." -msgstr "" -"Ceph RBD Blok Depolama birimlerini Ceph RBD nesneleri olarak saklayarak " -"nesne yedekleme yeteneği sağlar. Ceph RBD bir nesnenin her bir yedeğinin " -"farklı bir düğümde saklandığından emin olur. Yani birimleriniz sabit disk ve " -"düğüm arızalarından korunur, hatta veri merkezinin arızalanmasından bile." - -msgid "" -"Certain services running on the underlying operating system of your " -"OpenStack database may block Galera Cluster from normal operation or prevent " -"``mysqld`` from achieving network connectivity with the cluster." -msgstr "" -"OpenStack veritabanınızın altta yatan işletim sisteminde çalışan belli " -"servisler Galera Kümesini normal işlemekten veya ``mysqld``yi küme ile ağ " -"iletişimine geçmekten alıkoyabilir." - -msgid "Change the number of expected votes for a cluster to be quorate" -msgstr "" -"Bir kümenin yeterli çoğunluğu almış olması için beklenen oy sayısını değiştir" - -msgid "Change the number of votes assigned to a node" -msgstr "Bir düğüme atanan oy sayısını değiştir" - -msgid "" -"Cinder provides Block-Storage-as-a-Service suitable for performance " -"sensitive scenarios such as databases, expandable file systems, or providing " -"a server with access to raw block level storage." -msgstr "" -"Cinder veritabanları, genişleyebilir dosya sistemleri veya bir sunucuya ham " -"blok seviyesinde depolama erişimi sağlamak gibi hassas başarım senaryoları " -"için Servis olarak blok depolama sağlar." - -msgid "Clusters and quorums" -msgstr "Kümeler ve yetersayılar" - -msgid "" -"Clusters with an even number of hosts suffer from similar issues. A single " -"network failure could easily cause a N:N split where neither side retains a " -"majority. For this reason, we recommend an odd number of cluster members " -"when scaling up." -msgstr "" -"Eşit sayıda sunucuya sahip kümeler benzer sorunlarla karşılaşır. Tek bir ağ " -"arızası kolaylıkla iki tarafın da çoğunluğu tutmadığı N:N ayrımına yol " -"açabilir. Bu sebeple ölçeklemede tek sayıda küme ögesi seçmenizi tavsiye " -"ediyoruz." - -msgid "Collapsed" -msgstr "Kapalı" - -msgid "" -"Commit your configuration changes by entering the following command from " -"the :command:`crm configure` menu:" -msgstr "" -":command:`crm configure` menüsünden aşağıdaki komutu girerek yapılandırma " -"değişikliklerinizi gönderin:" - -msgid "" -"Commit your configuration changes from the :command:`crm configure` menu " -"with the following command:" -msgstr "" -"Aşağıdaki komutla yapılandırma değişikliklerinizi :command:`crm configure` " -"menüsünden gönderin:" - -msgid "Common deployment architectures" -msgstr "Genel kurulum mimarileri" - -msgid "Configuration" -msgstr "Yapılandırma" - -msgid "Configuration tips" -msgstr "Yapılandırma ipuçları" - -msgid "Configure Block Storage API service" -msgstr "Blok Depolama API servisini yapılandırın" - -msgid "Configure NTP" -msgstr "NTP'yi yapılandır" - -msgid "Configure OpenStack Identity service" -msgstr "OpenStack Kimlik servisini yapılandırın" - -msgid "Configure OpenStack Image service API" -msgstr "OpenStack İmaj API servisini yapılandırın" - -msgid "Configure OpenStack services to use HA Shared File Systems API" -msgstr "" -"OpenStack servislerini HA Paylaşımlı Dosya Sistemleri API'sini kullanacak " -"şekilde yapılandırın" - -msgid "Configure OpenStack services to use Rabbit HA queues" -msgstr "" -"OpenStack servislerini Rabbit HA kuyrukları kullanacak şekilde yapılandırın:" - -msgid "" -"Configure OpenStack services to use the highly available Block Storage API" -msgstr "" -"OpenStack servislerini yüksek kullanılır Blok Depolama API'sini kullanacak " -"şekilde yapılandırın" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Identity" -msgstr "" -"OpenStack servislerini yüksek kullanılırlıklı OpenStack Kimliği kullanacak " -"şekilde yapılandırın" - -msgid "" -"Configure OpenStack services to use the highly available OpenStack Image API" -msgstr "" -"OpenStack servislerini yüksek kullanılır OpenStack İmaj API'sini kullanacak " -"şekilde yapılandırma" - -msgid "Configure RabbitMQ for HA queues" -msgstr "RabbitMQ'yu HA kuyrukları için yapılandır" - -msgid "Configure Shared File Systems API service" -msgstr "Paylaşımlı Dosya Sistemleri API servisini yapılandırın" - -msgid "Configure the OpenStack components to use at least two RabbitMQ nodes." -msgstr "" -"OpenStack bileşenlerini en az iki RabbitMQ düğümü kullanacak şekilde " -"yapılandırın." - -msgid "Configure the VIP" -msgstr "VIP Yapılandırma" - -msgid "" -"Configure the kernel parameter to allow non-local IP binding. This allows " -"running HAProxy instances to bind to a VIP for failover. Add following line " -"to ``/etc/sysctl.conf``:" -msgstr "" -"Yerel-olmayan IP bağlamaya izin vermek için çekirdek parametresini " -"yapılandırın. Bu HAProxy sunucuların bir VIP'e bağlanmasını ya da yedeğe " -"geçmesine izin verir. ``/etc/sysctl.conf`` dosyasına şu satırı ekleyin:" - -msgid "Configuring Block Storage to listen on the VIP address" -msgstr "Blok Depolamayı VIP adresinde dinleyecek şekilde yapılandırmak" - -msgid "Configuring HAProxy" -msgstr "HAProxy Yapılandırması" - -msgid "Configuring InnoDB" -msgstr "InnoDB yapılandırması" - -msgid "Configuring OpenStack services to use this IP address" -msgstr "OpenStack serislerini bu IP adresini kullanacak şekilde yapılandırmak" - -msgid "" -"Configuring RAID on the hard drives that implement storage protects your " -"data against a hard drive failure. If the node itself fails, data may be " -"lost. In particular, all volumes stored on an LVM node can be lost." -msgstr "" -"Depolamayı sağlayan sabit diskler üzerinde RAID yapılandırmak verinizi sabit " -"disk arızalarından korur. Düğüm kendisi arızalanırsa veri kaybolabilir. " -"Özellikle LVM düğümünde depolanan tüm birimler kaybolabilir." - -msgid "Configuring high availability for instances" -msgstr "Sunucular için yüksek kullanılırlık yapılandırması" - -msgid "Configuring mysqld" -msgstr "Mysqld'yi yapılandırma" - -msgid "Configuring storage" -msgstr "Depolamanın yapılandırılması" - -msgid "Configuring the basic environment" -msgstr "Temel ortamın yapılandırılması" - -msgid "Configuring the compute node" -msgstr "Hesaplama düğümünü yapılandırın" - -msgid "Configuring the controller" -msgstr "Denetleyicini yapılandırılması" - -msgid "Configuring the networking services" -msgstr "Ağ servislerinin yapılandırılması" - -msgid "Configuring the server" -msgstr "Sunucunun yapılandırılması" - -msgid "Configuring the shared services" -msgstr "Paylaşımlı servisleri yapılandırın" - -msgid "Configuring wsrep replication" -msgstr "Wsrep yedeklemenin yapılandırılması" - -msgid "" -"Connect an additional quorum device to allow small clusters remain quorate " -"during node outages" -msgstr "" -"Düğüm arızalarında küçük kümelerin yeterli sayıda kalması için ek bir " -"yetersayı aygıtı bağla" - -msgid "" -"Consider that, while exchanges and bindings survive the loss of individual " -"nodes, queues and their messages do not because a queue and its contents are " -"located on one node. If we lose this node, we also lose the queue." -msgstr "" -"Değişimler ve bağlar tek düğümlerin kaybını kaldırabilse de, kuyruklar ve " -"iletileri kaldıramaz çünkü bir kuyruk ve içerikleri bir düğüm üzerinde " -"bulunur. Bu düğümü kaybedersek, kuyruğu da kaybederiz." - -msgid "Contents" -msgstr "İçindekiler" - -msgid "" -"Corosync can be configured to work with either multicast or unicast IP " -"addresses or to use the votequorum library." -msgstr "" -"Corosync çoklu yayın ya da tekli yayın IP adresleriyle veya votequorum " -"kitaplığını kullanacak şekilde yapılandırılabilir." - -msgid "" -"Corosync is started as a regular system service. Depending on your " -"distribution, it may ship with an LSB init script, an upstart job, or a " -"Systemd unit file." -msgstr "" -"Corosync düzenli bir sistem servisi olarak başlatılır. Dağıtımınıza göre, " -"bir LSB başlangıç betiği, upstart işi veya Systemd birim dosyası olabilir." - -msgid "" -"Create a configuration file for ``clustercheck`` at ``/etc/sysconfig/" -"clustercheck``:" -msgstr "" -"``clustercheck`` için ``/etc/sysconfig/clustercheck`` konumunda bir " -"yapılandırma dosyası oluşturun:" - -msgid "" -"Create a configuration file for the HAProxy monitor service, at ``/etc/" -"xinetd.d/galera-monitor``:" -msgstr "" -"``/etc/xinetd.d/galera-monitor`` konumunda HAProxy izleme servisi için " -"yapılandırma dosyası oluşturun:" - -msgid "" -"Create a symbolic link for the database server in the ``disable`` directory:" -msgstr "" -"``disable`` dizininde veritabanı servisi için bir sembolik bağlantı " -"oluşturun:" - -msgid "" -"Create and name the cluster. Then, start it and enable all components to " -"auto-start at boot time:" -msgstr "" -"Kümeyi oluşturun ve isimlendirin. Ardından başlatın ve tüm bileşenleri " -"önyükleme zamanında otomatik başlayacak şekilde etkinleştirin:" - -msgid "Create the Block Storage API endpoint with this IP." -msgstr "Bu IP ile Blok Depolama API uç noktasını oluşturun." - -msgid "Create the OpenStack Identity Endpoint with this IP address." -msgstr "Bu IP adresiyle OpenStack Kimlik Uç Noktasını oluşturun." - -msgid "Current upstream work" -msgstr "Mevcut yapılan iş" - -msgid "" -"Data integrity through fencing (a non-responsive process does not imply it " -"is not doing anything)" -msgstr "" -"Parmaklık ile veri bütünlüğü (yanıt vermeyen bir süreç hiçbir şey " -"yapmadığını ima etmez)" - -msgid "Data loss: Accidental deletion or destruction of data." -msgstr "Veri kaybı: Verinin kazayla silinmesi veya bozulması." - -msgid "Database (Galera Cluster) for high availability" -msgstr "Yüksek kullanılırlık için veritabanı (Galera Kümesi)" - -msgid "Database configuration" -msgstr "Veritabanı yapılandırması" - -msgid "Database hosts with Galera Cluster installed" -msgstr "Galera Kümesi yüklü veritabanı sunucuları" - -msgid "" -"Define the InnoDB memory buffer pool size. The default value is 128 MB, but " -"to compensate for Galera Cluster's additional memory usage, scale your usual " -"value back by 5%:" -msgstr "" -"InnoDB bellek tampon havuz boyutunu belirtin. Öntanımlı değer 128 MB'dir, " -"ama Galera Kümesinin ek hafıza kullanımına yer açmak için genel değerinizi " -"5% geri ölçekleyin:" - -msgid "Deployment flavors" -msgstr "Dağıtım nitelikleri" - -msgid "Deployment strategies" -msgstr "Kurulum stratejileri" - -msgid "Description" -msgstr "Açıklama" - -msgid "" -"Do not change this value. Other modes may cause ``INSERT`` statements on " -"tables with auto-increment columns to fail as well as unresolved deadlocks " -"that leave the system unresponsive." -msgstr "" -"Bu değeri değiştirmeyin. Diğer kipler otomatik artırmalı sütunlara sahip " -"tablolardaki ``INSERT`` işlemlerinin başarısız olmasına ve sistemi yanıtsız " -"bırakan çözülmemiş ölükilitlere yol açabilir." - -msgid "Download the resource agent to your system:" -msgstr "Kaynak aracısını sisteminize indirin:" - -msgid "" -"Each configured interface must have a unique ``ringnumber``, starting with 0." -msgstr "" -"Yapılandırılan her arayüzün 0 ile başlayan, benzersiz bir ``ringnumber`` " -"değeri olmalıdır." - -msgid "Each instance has its own IP address:" -msgstr "Her bir sunucu kendi IP adresine sahiptir:" - -msgid "" -"Each instance of HAProxy configures its front end to accept connections only " -"to the virtual IP (VIP) address. The HAProxy back end (termination point) is " -"a list of all the IP addresses of instances for load balancing." -msgstr "" -"Her bir HAProxy ön yüzünü yalnızca sanal IP (VIP) adresinden bağlantı kabul " -"edecek şekilde yapılandırır. HAProxy arka ucu (sonlanma noktası) yük " -"dengeleme için olan sunucuların IP adresi listesidir." - -msgid "" -"Each service also has a backup but manages both the main and redundant " -"systems concurrently. This way, if there is a failure, the user is unlikely " -"to notice. The backup system is already online and takes on increased load " -"while the main system is fixed and brought back online." -msgstr "" -"Her servis ayrıca bir yedeğe sahiptir ama hem ana hem yedek sistemleri aynı " -"zamanda yönetir. Bu yolla, bir arıza olduğunda kullanıcı fark etmeyecektir " -"bile. Yedek sistem zaten çevrimiçidir ve ana sistem düzeltilip tekrar " -"çevrimiçi yapılana kadar artan yükü kaldıracaktır." - -msgid "" -"Edit the :file:`/etc/glance/glance-api.conf` file to configure the OpenStack " -"Image service:" -msgstr "" -":file:`/etc/glance/glance-api.conf` dosyasını düzenleyerek OpenStack İmaj " -"servisini yapılandırın:" - -msgid "Edit the :file:`/etc/manila/manila.conf` file:" -msgstr ":file:`/etc/manila/manila.conf` dosyasını düzenleyin:" - -msgid "" -"Edit the :file:`keystone.conf` file to change the values of the :manpage:" -"`bind(2)` parameters:" -msgstr "" -":manpage:`bind(2)` parametrelerinin değerlerini değiştirmek için :file:" -"`keystone.conf` dosyasını düzenleyin:" - -msgid "" -"Edit the ``/etc/cinder/cinder.conf`` file. For example, on a RHEL-based " -"system:" -msgstr "" -"``/etc/cinder/cinder.conf`` dosyasını düzenleyin. Örneğin RHEL tabanlı " -"sistemlerde:" - -msgid "Enhanced failure detection" -msgstr "Gelişmiş arıza algılama" - -msgid "" -"Ensure that the InnoDB locking mode for generating auto-increment values is " -"set to ``2``, which is the interleaved locking mode:" -msgstr "" -"Otomatik artırma değerleri üretmek için InnoDB kilitleme kipinin sıralı " -"birleştirme kilit kipi olan ``2`` olarak ayarlandığından emn olun:" - -msgid "" -"Ensure that the InnoDB log buffer is written to file once per second, rather " -"than on each commit, to improve performance:" -msgstr "" -"Başarımı artırmak için InnoDB kayıt tampon belleğinin dosyaya her gönderide " -"değil saniyede bir yazıldığına emin olun:" - -msgid "" -"Ensure that the binary log format is set to use row-level replication, as " -"opposed to statement-level replication:" -msgstr "" -"İkilik kayıt biçiminin açıklama-seviyesinde değil satır-seviyesinde " -"yedekleme kullanacak şekilde yapılandırıldığından emin olun:" - -msgid "" -"Ensure that the database server is not bound only to the localhost: " -"``127.0.0.1``. Also, do not bind it to ``0.0.0.0``. Binding to the localhost " -"or ``0.0.0.0`` makes ``mySQL`` bind to all IP addresses on the machine, " -"including the virtual IP address causing ``HAProxy`` not to start. Instead, " -"bind to the management IP address of the controller node to enable access by " -"other nodes through the management network:" -msgstr "" -"Veritabanı sunucusunun yalnızca localhost: ``127.0.0.1`` adresine bağlı " -"olmadığından emin olun. Ayrıca ``0.0.0.0`` adresine de bağlamayın. localhost " -"veya ``0.0.0.0`` adresine bağlamak ``mySQL``in makinedeki tüm IP adreslerine " -"bağlanması demektir, buna sanal IP adresi de dahildir bu da ``HAProxy``nin " -"başlamamasına sebep olur. Bunun yerine kontrol düğümünün yönetim IP adresine " -"bağlayarak diğer düğümlerden erişimi yönetim ağı üzerinden etkinleştirin:" - -msgid "Ensure that the default storage engine is set to InnoDB:" -msgstr "Öntanımlı depolama motorunun InnoDB'ye ayarlandığından emin olun:" - -msgid "" -"Ensure your HAProxy installation is not a single point of failure, it is " -"advisable to have multiple HAProxy instances running." -msgstr "" -"HAProxy kurulumunuzun tek bir kırılma noktası olmadığından emin olun, birden " -"çok HAProxy sunucusu çalıştırılması önerilir." - -msgid "" -"Ephemeral storage is allocated for an instance and is deleted when the " -"instance is deleted. The Compute service manages ephemeral storage and by " -"default, Compute stores ephemeral drives as files on local disks on the " -"compute node. As an alternative, you can use Ceph RBD as the storage back " -"end for ephemeral storage." -msgstr "" -"Geçici depolama sunucu için ayrılır ve sunucu silindiğinde silinir. Geçici " -"depolamayı Hesaplama servisi yönetir, ve öntanımlı olarak Hesaplama geçici " -"sürücüleri hesaplama düğümündeki yerel disklerde dosya olarak tutar. " -"Alternatif olarak, geçici depolama için arka uç olarak Ceph RBD " -"kullanabilirsiniz." - -msgid "" -"Even a distributed or replicated application that is able to survive " -"failures on one or more machines can benefit from a cluster manager because " -"a cluster manager has the following capabilities:" -msgstr "" -"Bir ya da daha fazla makinede arızaları kaldırabilen dağıtık ya da yedekli " -"bir uygulama bile bir küme yöneticiden faydalanabilir çünkü küme yönetici şu " -"yeteneklere sahiptir:" - -msgid "Existing solutions" -msgstr "Mevcut çözümler" - -msgid "Facility services such as power, air conditioning, and fire protection" -msgstr "Güç, havalandırma, yangın koruması gibi tesis servisleri" - -msgid "Firewall" -msgstr "Güvenlik duvarı" - -msgid "" -"For Liberty, you can not have the standalone network nodes. The Networking " -"services are run on the controller nodes. In this guide, the term `network " -"nodes` is used for convenience." -msgstr "" -"Liberty için, bağımsız ağ düğümlerine sahip olamazsınız. Ağ servisleri " -"kontrol düğümlerinde çalışır. Bu kılavuzda, kolaylık olması için `ağ " -"düğümleri` terimi kullanılmıştır." - -msgid "" -"For OpenStack Compute, (if your OpenStack Identity service IP address is " -"10.0.0.11) use the following configuration in the :file:`api-paste.ini` file:" -msgstr "" -"OpenStack Hesaplama için, (OpenStack Kimlik servisi IP adresiniz 10.0.0.11 " -"ise) :file:`api-paste.ini` dosyasında şu yapılandırmayı kullanın:" - -msgid "For RHEL, Fedora, or CentOS:" -msgstr "RHEL, Fedora veya CentOS için:" - -msgid "" -"For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the " -"following process uses Systemd unit files." -msgstr "" -"Red Hat Kurumsal Linux ve Red Hat tabanlı Linux dağıtımları için, aşağıdaki " -"süreç Systemd birim dosyaları kullanır." - -msgid "" -"For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. You should " -"verify the fingerprint of the imported GPG key before using it." -msgstr "" -"SLES 12 için, paketler GPG anahtarı 893A90DAD85F9316 ile imzalanmıştır. " -"Kullanmadan önce içe aktarılan GPG anahtarının parmakizini doğrulamalısınız." - -msgid "For SLES 12:" -msgstr "SLES 12 için:" - -msgid "" -"For UDPU, every node that should be a member of the membership must be " -"specified." -msgstr "UDPU için, aboneliğin üyesi olması gereken her düğüm belirtilmelidir." - -msgid "" -"For Ubuntu 16.04.1: Create a configuration file for ``clustercheck`` at ``/" -"etc/default/clustercheck``." -msgstr "" -"Ubuntu 16.04.1 için: ``/etc/default/clustercheck`` konumunda " -"``clustercheck`` için bir yapılandırma dosyası oluşturun." - -msgid "For Ubuntu or Debian:" -msgstr "Ubuntu veya Debian için:" - -msgid "" -"For Ubuntu, you should also enable the Corosync service in the ``/etc/" -"default/corosync`` configuration file." -msgstr "" -"Ubuntu için, ayrıca ``/etc/default/corosync`` yapılandırma dosyasında " -"Corosync servisini etkinleştirmelisiniz." - -msgid "" -"For `Fedora `_" -msgstr "" -"`Fedora `_ için" - -msgid "" -"For `Ubuntu `_" -msgstr "" -"`Ubuntu `_ için" - -msgid "For ``crmsh``:" -msgstr "``crmsh`` için:" - -msgid "For ``pcs``:" -msgstr "``pcs`` için:" - -msgid "" -"For a complete list of the available parameters, run the ``SHOW VARIABLES`` " -"command from within the database client:" -msgstr "" -"Kullanılabilir parametrelerin tam listesi için, veritabanı istemcisinden " -"``SHOW VARIABLES`` komutunu çalıştırın:" - -msgid "" -"For backward compatibility and supporting existing deployments, the central " -"agent configuration supports using different configuration files. This is " -"for groups of service instances that are running in parallel. For enabling " -"this configuration, set a value for the ``partitioning_group_prefix`` option " -"in the `polling section `_ in the OpenStack Configuration " -"Reference." -msgstr "" -"Geriye uyumluluk ve mevcut kurulumlara destek için, merkezi aracı " -"yapılandırması farklı yapılandırma dosyalarının kullanımını destekler. Bu " -"paralel olarak çalışan servis sunucu grupları içindir. Bu yapılandırmayı " -"etkinleştirmek için, OpenStack Yapılandırma Kılavuzunda `yoklama kısmındaki " -"`_ ``partitioning_group_prefix`` seçeneği için bir değer " -"ayarlayın." - -msgid "" -"For demonstrations and studying, you can set up a test environment on " -"virtual machines (VMs). This has the following benefits:" -msgstr "" -"Gösterim çalışma amaçlı, sanal makineler (VM) üzerine bir deneme ortamı " -"kurabilirsiniz. Bunun aşağıdaki faydaları bulunur:" - -msgid "" -"For detailed instructions about installing HAProxy on your nodes, see the " -"HAProxy `official documentation `_." -msgstr "" -"Düğümlerinize HAProxy kurmak hakkında ayrıntılı bilgi için, HAProxy `resmi " -"belgelendirmesine `_ göz atın." - -msgid "" -"For documentation about these parameters, ``wsrep`` provider option, and " -"status variables available in Galera Cluster, see the Galera cluster " -"`Reference `_." -msgstr "" -"Bu parametrelerle ilgili belgelendirme, ``wsrep`` sağlayıcı seçeneği, ve " -"Galera Kümesindeki kullanılabilir durum değişkenleri için, Galera küme " -"`Başvurusuna`_ göz atın." - -msgid "" -"For each sub-group of the central agent pool with the same " -"``partitioning_group_prefix``, a disjoint subset of meters must be polled to " -"avoid samples being missing or duplicated. The list of meters to poll can be " -"set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. For " -"more information about pipelines see the `Data processing and pipelines " -"`_ " -"section." -msgstr "" -"Aynı ``partitioning_group_prefix`` değerine sahip merkezi aracı havuzu alt " -"grubu için, örneklerin kayıp ya da kopya olmasını engellemek için " -"ölçümlerin ayrık alt kümeler yoklanmalıdır. Yoklanacak ölçümlerin listesi :" -"file:`/etc/ceilometer/pipeline.yaml` yapılandırma dosyasında ayarlanabilir. " -"Yönlendirmelerle ilgili daha fazla bilgi için `Veri işleme ve yönlendirme " -"`_ " -"kısmına göz atın." - -msgid "" -"For environments that do not support multicast, Corosync should be " -"configured for unicast. An example fragment of the :file:`corosync.conf` " -"file for unicastis is shown below:" -msgstr "" -"Çoklu yayın desteklemeyen ortamlar için, Corosync tekli yayın için " -"yapılandırılmalıdır. Tekli yayınlar için ayarlanmış :file:`corosync.conf` " -"dosyasının örnek bir parçası aşağıdadır:" - -msgid "" -"For example, if your OpenStack Image API service IP address is 10.0.0.11 (as " -"in the configuration explained here), you would use the following " -"configuration in your :file:`nova.conf` file:" -msgstr "" -"Örneğin, OpenStack İmaj API servisi IP adresiniz 10.0.0.11 ise (burada " -"açıklanan yapılandırmadaki gibi) :file:`nova.conf` dosyanızda aşağıdaki " -"yapılandırmayı kullanırdınız:" - -msgid "" -"For example, in a seven-node cluster, the quorum should be set to " -"``floor(7/2) + 1 == 4``. If quorum is four and four nodes fail " -"simultaneously, the cluster itself would fail, whereas it would continue to " -"function, if no more than three nodes fail. If split to partitions of three " -"and four nodes respectively, the quorum of four nodes would continue to " -"operate the majority partition and stop or fence the minority one (depending " -"on the no-quorum-policy cluster configuration)." -msgstr "" -"Örneğin yedi düğümlük bir kümede, yetersayı ``taban(7/2) + 1 == 4`` olarak " -"ayarlanmalıdır. Yetersayı dört ise ve dört düğüm aynı anda arızalanırsa, " -"küme de başarısız olur, üç düğümden fazlası arızalanmazsa çalışmaya devam " -"eder. Eğer üç ve dört düğüm olarak parçalandıysa, dört düğümlük yetersayı " -"ile çoğunluğun olduğu kısımda işlemeye devam edecektir ve azınlıkta " -"kalanları durduracak ya da yalıtacaktır (no-quorum-policy küme " -"yapılandırmasına dayanarak)." - -msgid "" -"For example, you may enter ``edit p_ip_glance-api`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"Örneğin :command:`crm configure` menüsünden ``edit p_ip_glance-api`` girip " -"kaynağı düzenleyerek tercih ettiğiniz sanal IP adresi ile " -"eşleştirebilirsiniz." - -msgid "" -"For example, you may enter ``edit p_ip_keystone`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"Örneğin, :command:`crm configure` menüsünden ``edit p_ip_keystone`` girmeniz " -"ve kaynağı tercih edilen sanal IP adresiyle eşleşecek şekilde düzenlemeniz " -"gerekebilir." - -msgid "" -"For example, you may enter ``edit p_ip_manila-api`` from the :command:`crm " -"configure` menu and edit the resource to match your preferred virtual IP " -"address." -msgstr "" -"Örneğin :command:`crm configure` menüsünden ``edit p_ip_manila-api`` " -"girebilir ve kaynağı tercih ettiğiniz sanal IP adresi ile eşleşecek şekilde " -"düzenleyebilirsiniz." - -msgid "" -"For firewall configurations, Corosync communicates over UDP only, and uses " -"``mcastport`` (for receives) and ``mcastport - 1`` (for sends)." -msgstr "" -"Güvenlik duvarı yapılandırmaları için, Corosync yalnızca UDP üzerinden " -"iletişim kurar, ve ``mcastport`` (alıcılar için) ve ``mcastport - 1`` " -"(göndericiler için) kullanır." - -msgid "" -"For information about the required configuration options to set in the :file:" -"`ceilometer.conf`, see the `coordination section `_ in the OpenStack Configuration " -"Reference." -msgstr "" -":file:`ceilometer.conf` dosyasında ayarlanacak gerekli yapılandırma " -"seçenekleri hakkında bilgi için, OpenStack Yapılandırma Başvurusundaki " -"`eşgüdüm kısmına `_ göz atın." - -msgid "" -"For more information about configuring storage back ends for the different " -"storage options, see `Manage volumes `_ in the OpenStack Administrator Guide." -msgstr "" -"Farklı depolama seçenekleri için depolama arka uçları yapılandırmayla ilgili " -"daha fazla bilgi için, OpenStack Yönetici Kılavuzundaki `Birimlerin " -"yönetimine `_ göz atın." - -msgid "" -"For more information on configuring SELinux to work with Galera Cluster, see " -"the `SELinux Documentation `_" -msgstr "" -"SELinux'u Galera Kümesi ile çalışacak şekilde yapılandırmayla ilgili daha " -"fazla bilgi için `SELinux Belgelendirmesine `_ göz atın" - -msgid "" -"For more information on firewalls, see `firewalls and default ports `_ in OpenStack " -"Administrator Guide." -msgstr "" -"Güvenlik duvarlarıyla ilgili daha fazla bilgi için, OpenStack Yönetici " -"Kılavuzunda `güvenlik duvarları ve öntanımlı bağlantı noktalarına `_ göz atın." - -msgid "" -"For more information, see the official installation manual for the " -"distribution:" -msgstr "Daha fazla bilgi için, dağıtım için resmi kurulum kılavuzuna göz atın:" - -msgid "For openSUSE:" -msgstr "openSUSE için:" - -msgid "For servers that use ``systemd``, run the following command:" -msgstr "``systemd`` kullanan sunucularda şu komutu çalıştırın:" - -msgid "For servers that use ``systemd``, run the following commands:" -msgstr "``systemd`` kullanan sunucular için, aşağıdaki komutları çalıştırın:" - -msgid "" -"For these reasons, we highly recommend the use of a cluster manager like " -"`Pacemaker `_." -msgstr "" -"Bu sebeplerle, `Pacemaker `_ gibi bir küme " -"yöneticiyi kesinlikle öneriyoruz." - -msgid "" -"For this reason, each cluster in a high availability environment should have " -"an odd number of nodes and the quorum is defined as more than a half of the " -"nodes. If multiple nodes fail so that the cluster size falls below the " -"quorum value, the cluster itself fails." -msgstr "" -"Bu sebeple, yüksek kullanılır bir ortamdaki her bir küme tek sayıda düğüme " -"sahip olmalı ve yetersayı düğümlerin yarısından fazlası olarak " -"tanımlanmalıdır. Birden çok düğüm arızalanır ve küme boyutu yetersayı " -"boyutunun altına düşerse, küme kendisi başarısız olur." - -msgid "" -"Galera Cluster configuration parameters all have the ``wsrep_`` prefix. You " -"must define the following parameters for each cluster node in your OpenStack " -"database." -msgstr "" -"Galera Kümesi yapılandırma parametrelerinin ``wsrep_`` öneki bulunur. " -"OpenStak veritabanınızdaki her küme düğümü için aşağıdaki parametreleri " -"tanımlamalısınız." - -msgid "" -"Galera Cluster does not support non-transactional storage engines and " -"requires that you use InnoDB by default. There are some additional " -"parameters that you must define to avoid conflicts." -msgstr "" -"Galera Kümesi aktarımsal olmayan depolama motorlarını desteklemez ve " -"öntanımlı olarak InnoDB kullanmanızı gerektirir. Çakışmaları önlemek için " -"tanımlamanız gereken bazı ek parametreler bulunur." - -msgid "" -"Galera Cluster requires that you open the following ports to network traffic:" -msgstr "" -"Galera Kümesi aşağıdaki bağlantı noktalarını ağ trafiğine açmanızı " -"gerektirir:" - -msgid "Galera can be configured using one of the following strategies:" -msgstr "Galera aşağıdaki stratejilerden biri kullanılarak yapılandırılabilir:" - -msgid "Galera runs behind HAProxy:" -msgstr "Galera HAProxy arkasında çalışır:" - -msgid "" -"Galera synchronous replication guarantees a zero slave lag. The failover " -"procedure completes once HAProxy detects that the active back end has gone " -"down and switches to the backup one, which is then marked as ``UP``. If no " -"back ends are ``UP``, the failover procedure finishes only when the Galera " -"Cluster has been successfully reassembled. The SLA is normally no more than " -"5 minutes." -msgstr "" -"Galera eşzamanlı çoğaltma sıfır gecikmeli köleler garantiler. Arıza geçiş " -"yöntemi HAProxy etkin arka ucun gittiğini algılayıp yedek olana geçtiğinde " -"tamamlanır, ardından yeni etkin ``AÇIK`` olarak işaretlenir. Hiçbir arka uç " -"``AÇIK`` değilse arıza geçiş yöntemi yalnızca Galera Kümesi yeniden bir " -"araya geldiğinde biter. SLA normalde 5 dakikadan fazla değildir." - -msgid "HAProxy" -msgstr "HAProxy" - -msgid "" -"HAProxy load balances incoming requests and exposes just one IP address for " -"all the clients." -msgstr "" -"HAProxy gelen isteklerin yükünü dengeler ve istemciler için yalnızca bir IP " -"adresi sunar." - -msgid "" -"HAProxy provides a fast and reliable HTTP reverse proxy and load balancer " -"for TCP or HTTP applications. It is particularly suited for web crawling " -"under very high loads while needing persistence or Layer 7 processing. It " -"realistically supports tens of thousands of connections with recent hardware." -msgstr "" -"HAProxy TCP ve HTTP uygulamalar için hızlı ve güvenilir HTTP ters vekili " -"sağlar. Kalıcılık ve Katman 7 işleme ihtiyacı duyulan çok yüksek yüklerde " -"web yürütmelerine özellikle uygundur. Güncel donanımla gerçekçi bir şekilde " -"on binlerce bağlantıyı destekler." - -msgid "Hardware considerations for high availability" -msgstr "Yüksek kullanılırlık için donanım etmenleri" - -msgid "Hardware setup" -msgstr "Donanım kurulumu" - -msgid "" -"High availability is implemented with redundant hardware running redundant " -"instances of each service. If one piece of hardware running one instance of " -"a service fails, the system can then failover to use another instance of a " -"service that is running on hardware that did not fail." -msgstr "" -"Yüksek kullanılırlık her servisin yedeğini çalıştıran yedekli donanım ile " -"uygulanır. Bir servisi çalıştıran bir donanım parçası arızalanırsa, sistem " -"arızalanmayan donanımda çalışan servisi kullanacak şekilde geçiş yapabilir." - -msgid "" -"High availability is not for every user. It presents some challenges. High " -"availability may be too complex for databases or systems with large amounts " -"of data. Replication can slow large systems down. Different setups have " -"different prerequisites. Read the guidelines for each setup." -msgstr "" -"Yüksek kullanılırlık her kullanıcı için değildir. Bazı zorlukları " -"beraberinde getirir. Yüksek kullanılırlık veritabanları veya büyük miktarda " -"veriye sahip sistemlerde çok karmaşık olabilir. Çoğaltma işlemi büyük " -"sistemleri yavaşlatabilir. Farklı kurulumların farklı ön gereksinimleri " -"bulunur. Her bir kurulum için kılavuzları okuyun." - -msgid "High availability is turned off as the default in OpenStack setups." -msgstr "" -"OpenStack kurulumlarında yüksek kullanılırlık öntanımlı olarak kapalıdır." - -msgid "High availability systems seek to minimize the following issues:" -msgstr "" -"Yüksek kullanılabilir sistemler şu sorunları asgariye indirmeye çalışır:" - -msgid "" -"High availability systems typically achieve an uptime percentage of 99.99% " -"or more, which roughly equates to less than an hour of cumulative downtime " -"per year. In order to achieve this, high availability systems should keep " -"recovery times after a failure to about one to two minutes, sometimes " -"significantly less." -msgstr "" -"Yüksek kullanılır sistemler genellikle çalışma zamanı yüzdesini 99.99% veya " -"daha yukarıda tutar, bunun anlamı yılda bir saatten az kapalı kalma " -"süresidir. Bunu elde etmek için, yüksek kullanılabilir sistemler arıza " -"sonrası kurtarma sürelerini bir iki dakikada, bazen daha da aşağıda " -"tutmalıdırlar." - -msgid "Highly available Block Storage API" -msgstr "Yüksek kullanılır Blok Depolama API'si" - -msgid "Highly available Identity API" -msgstr "Yüksek kullanılırlıklı Kimlik API'si" - -msgid "Highly available Image API" -msgstr "Yüksek kullanılır İmaj API'si" - -msgid "Highly available Shared File Systems API" -msgstr "Yüksek kullanılır Paylaşımlı Dosya Sistemi API'si" - -msgid "Highly available Telemetry" -msgstr "Yüksek kullanılır Telemetri" - -msgid "How long to back-off for between retries when connecting to RabbitMQ:" -msgstr "" -"RabbitMQ'ya bağlanırken yeniden denemeler arasında ne kadar bekleneceği:" - -msgid "" -"However, running an OpenStack environment on VMs degrades the performance of " -"your instances, particularly if your hypervisor or processor lacks support " -"for hardware acceleration of nested VMs." -msgstr "" -"Ancak, bir OpenStack ortamını sanal makineler üzerinde çalıştırmak " -"sunucularınızın başarımını azaltır, özellikle hipervizörünüz veya işlemciniz " -"içiçe sanal makinler için donanım hızlandırması desteğine sahip değilse." - -msgid "" -"If the Block Storage service runs on the same nodes as the other services, " -"then it is advisable to also include:" -msgstr "" -"Blok Depolama servisi diğer servislerle aynı düğümlerde çalışırsa, ayrıca " -"şunları içermesi de tavsiye edilir:" - -msgid "" -"If the Identity service will be sending ceilometer notifications and your " -"message bus is configured for high availability, you will need to ensure " -"that the Identity service is correctly configured to use it. For details on " -"how to configure the Identity service for this kind of deployment, see :doc:" -"`shared-messaging`." -msgstr "" -"Kimlik servisi ceilometer bildirilerini gönderecekse ve ileti yolunuz yüksek " -"kullanılırlık için yapılandırılmışsa, Kimlik servisinin bunu kullanacak " -"şekilde düzgün yapılandırıldığından emin olmalısınız. Kimlik servisinin bu " -"tür bir kurulum için nasıl yapılandırılacağıyla ilgili ayrıntılar için bknz :" -"doc:`shared-messaging`." - -msgid "" -"If the ``broadcast`` parameter is set to ``yes``, the broadcast address is " -"used for communication. If this option is set, the ``mcastaddr`` parameter " -"should not be set." -msgstr "" -"``broadcast`` parametresi ``yes`` olarak ayarlanırsa, yayın adresi iletişim " -"için kullanılır. Bu seçenek ayarlıysa, ``mcastaddr`` parametresi " -"ayarlanmamalıdır." - -msgid "" -"If the cluster is working, you can create usernames and passwords for the " -"queues." -msgstr "" -"Küme çalışıyorsa, kuyruklar için kullanıcı adları ve parolalar " -"oluşturabilirsiniz." - -msgid "" -"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " -"lines under the service stanza. These stanzas enable Pacemaker to start up. " -"Another potential problem is the boot and shutdown order of Corosync and " -"Pacemaker. To force Pacemaker to start after Corosync and stop before " -"Corosync, fix the start and kill symlinks manually:" -msgstr "" -"Ubuntu 14.04 üzerinde Corosync sürüm 2 kullanıyorsanız, servis kıtasındaki " -"satırları silin veya yorum satırına alın. Bu kıtalar Pacemaker'in " -"başlayışını etkinleştirir. Potansiyel başka bir problem Corosync ve " -"Pacemaker'in önyükleniş ve kapanış sıralarıyla ilgilidir. Pacemaker'in " -"Corosync'den sonra başlamasını ve Corosync'den önce durmasını sağlamak için, " -"başlama ve öldürme sembolik bağlarını elle düzeltin:" - -msgid "" -"If you are using Corosync version 2, use the :command:`corosync-cmapctl` " -"utility instead of :command:`corosync-objctl`; it is a direct replacement." -msgstr "" -"Corosync sürüm 2 kullanıyorsanız, :command:`corosync-objctl`; yerine :" -"command:`corosync-cmapctl` aracını kullanın, doğrudan yerine geçer." - -msgid "" -"If you are using both private and public IP addresses, create two virtual IP " -"addresses and define the endpoint. For example:" -msgstr "" -"Hem özel hem açık IP adresleri kullanıyorsanız, iki sanal IP adresi " -"oluşturun ve uç noktayı tanımlayın. Örneğin:" - -msgid "" -"If you are using both private and public IP addresses, create two virtual " -"IPs and define your endpoint. For example:" -msgstr "" -"Hep gizli hem açık IP adresleri kullanıyorsanız, iki sanal IP oluşturun ve " -"uç noktanızı tanımlayın. Örneğin:" - -msgid "" -"If you are using both private and public IP addresses, you should create two " -"virtual IPs and define your endpoints like this:" -msgstr "" -"Hem özel hem açık IP adresleri kullanıyorsanız, iki sanal IP tanımlamalı ve " -"uç noktalarınızı şu şekilde tanımlamalısınız:" - -msgid "" -"If you are using the Block Storage service OCF agent, some settings will be " -"filled in for you, resulting in a shorter configuration file:" -msgstr "" -"Blok Depolama servisi OCF aracısını kullanıyorsanız, bazı ayarlar sizin için " -"doldurulacaktır, bu da daha kısa bir yapılandırma dosyasına sebep olur." - -msgid "" -"If you are using the horizon Dashboard, edit the :file:`local_settings.py` " -"file to include the following:" -msgstr "" -"Horizon Kontrol Panelini kullanıyorsanız, :file:`local_settings.py` " -"dosyasını aşağıdakileri içerecek şekilde düzenleyin:" - -msgid "" -"If you change the configuration from an old set-up that did not use HA " -"queues, restart the service:" -msgstr "" -"Yapılandırmayı HA kuyrukları kullanmayan eski bir kurulumdan " -"değiştirirseniz, servisi yeniden başlatın:" - -msgid "" -"If you use HAProxy as a load-balancing client to provide access to the " -"Galera Cluster, as described in the :doc:`controller-ha-haproxy`, you can " -"use the ``clustercheck`` utility to improve health checks." -msgstr "" -":doc:`controller-ha-haproxy` belgesinde tanımlandığı gibi Galera Kümesine " -"erişim sağlamak için yük dengeleyen istemci olarak HAProxy kullanıyorsanız, " -"sağlık kontrollerini iyileştirmek için ``clustercheck`` aracını " -"kullanabilirsiniz." - -msgid "" -"In Corosync, configurations use redundant networking (with more than one " -"interface). This means you must select a Redundant Ring Protocol (RRP) mode " -"other than none. We recommend ``active`` as the RRP mode." -msgstr "" -"Corosync'de, yapılandırmalar yedekli ağ kullanır (birden fazla arayüzle). " -"Bunun anlamı hiçbir şeyden ziyade Yedekli Halka İletişim Kuralı (RRP) kipini " -"seçmeniz gerekeceğidir. RRP kipi olarak ``etkin`` öneriyoruz." - -msgid "" -"In Red Hat Enterprise Linux or CentOS environments, this is a recommended " -"path to perform configuration. For more information, see the `RHEL docs " -"`_." -msgstr "" -"Red Hat Kurumsal Linux ve CentOS ortamlarında, bu yapılandırma yapmak için " -"tavsiye edilen yoldur. Daha fazla bilgi için `RHEL belgelerine `_ göz atın." - -msgid "" -"In a collapsed configuration, there is a single cluster of 3 or more nodes " -"on which every component is running." -msgstr "" -"Kapalı yapılandırmada, her birinde tüm bileşenlerin çalıştığı 3 ya da daha " -"fazla düğümden oluşan tek bir küme bulunur." - -msgid "" -"In addition to Galera Cluster, you can also achieve high availability " -"through other database options, such as PostgreSQL, which has its own " -"replication system." -msgstr "" -"Galera Kümesine ek olarak, kendi yedekleme sistemi olan PostgreSQL gibi " -"başka veritabanı seçenekleri ile de yüksek kullanılırlığı elde edebilirsiniz." - -msgid "" -"In general, we can divide all the OpenStack components into three categories:" -msgstr "Genelde, tüm OpenStack bileşenlerini üç kategoriye bölebiliriz:" - -msgid "" -"In the Galera Cluster, the Primary Component is the cluster of database " -"servers that replicate into each other. In the event that a cluster node " -"loses connectivity with the Primary Component, it defaults into a non-" -"operational state, to avoid creating or serving inconsistent data." -msgstr "" -"Galera Kümesinde, Birincil Bileşen birbiriyle yedekleme yapan veritabanı " -"sunucuları kümesidir. Bir küme düğümünün Birincil Bileşenle bağlantıyı " -"kaybettiği olaylarda, tutarsız veri oluşturmamak ya da sunmamak için " -"öntanımlı olarak işlevsiz bir duruma düşer." - -msgid "" -"In the event that a component fails and a back-up system must take on its " -"load, most high availability systems will replace the failed component as " -"quickly as possible to maintain necessary redundancy. This way time spent in " -"a degraded protection state is minimized." -msgstr "" -"Bir bileşenin arızalandığı ve yükünü yedek bir sistemin alması gerektiği " -"durumlarda, çoğu yüksek kullanılır sistem arızalı bileşeni olabildiğince " -"hızlı değiştirerek gerekli yedekliliği sağlar. Bu yolla kullanılamayan " -"koruma durumunda geçirilen zaman asgari düzeyde tutulur." - -msgid "" -"In the event that you need to restart any cluster node, you can do so. When " -"the database server comes back it, it establishes connectivity with the " -"Primary Component and updates itself to any changes it may have missed while " -"down." -msgstr "" -"Herhangi bir küme düğümünü yeniden başlatmanız gerekirse, yapabilirsiniz. " -"Veritabanı sunucusu geri geldiğinde, Birincil Bileşen ile bağlantıyı kurar " -"ve kapalıyken alamadığı tüm değişikliklerle kendini günceller." - -msgid "" -"In theory, you can run the Block Storage service as active/active. However, " -"because of sufficient concerns, we recommend running the volume component as " -"active/passive only." -msgstr "" -"Teoride Blok Depolama servisini etkin/etkin olarak çalıştırabilirsiniz. " -"Ancak birim bileşenini yalnızca etkin/pasif çalıştırmanızı önermek için " -"yeterli endişelerimiz var." - -msgid "" -"In this configuration, each service runs in a dedicated cluster of 3 or more " -"nodes." -msgstr "" -"Bu yapılandırmada, her servis 3 veya daha fazla düğümden oluşan adanmış bir " -"kümede çalışır. " - -msgid "" -"Individual cluster nodes can stop and be restarted without issue. When a " -"database loses its connection or restarts, the Galera Cluster brings it back " -"into sync once it reestablishes connection with the Primary Component. In " -"the event that you need to restart the entire cluster, identify the most " -"advanced cluster node and initialize the Primary Component on that node." -msgstr "" -"Bağımsız küme düğümleri durdurulabilir ve sorunsuzca yeniden başlatılabilir. " -"Bir veritabanı bağlantısını kaybettiğinde veya yeniden başladığında, Galera " -"Kümesi Birincil Bileşenle bağlantısını yeniden sağladığında tekrar onu " -"eşzamanlar. Tüm kümeyi yeniden başlatmanız gerekirse, en gelişmiş küme " -"düğümünü bulun ve Birincil Bİleşeni onun üzerinde başlatın." - -msgid "" -"Initialize the Primary Component on one cluster node. For servers that use " -"``init``, run the following command:" -msgstr "" -"Birincil Bileşeni bir küme düğümünde ilklendirin. ``init`` kullanan " -"sunucularda şu komutu çalıştırın:" - -msgid "Initializing the cluster" -msgstr "Kümenin başlatılması" - -msgid "Install RabbitMQ" -msgstr "RabbitMQ Kurulumu" - -msgid "Install packages" -msgstr "Paketleri kurun" - -msgid "Installing Memcached" -msgstr "Memcached Kurulumu" - -msgid "Installing the operating system" -msgstr "İşletim sisteminin kurulumu" - -msgid "Introduction to OpenStack high availability" -msgstr "OpenStack yüksek kullanılırlığa giriş" - -msgid "" -"It is also possible to follow a segregated approach for one or more " -"components that are expected to be a bottleneck and use a collapsed approach " -"for the remainder." -msgstr "" -"Ayrıca darboğaz oluşturması beklenen bileşenler bir ya da daha fazla bileşen " -"için ayrık bir yaklaşım ve geri kalanlar için kapalı yaklaşım tercih " -"edilebilir." - -msgid "" -"It is possible to add controllers to such an environment to convert it into " -"a truly highly available environment." -msgstr "" -"Böyle bir ortama denetleyiciler ekleyerek gerçekten yüksek kullanılabilir " -"bir ortama dönüştürmek mümkündür." - -msgid "" -"It is possible to deploy three different flavors of the Pacemaker " -"architecture. The two extremes are ``Collapsed`` (where every component runs " -"on every node) and ``Segregated`` (where every component runs in its own 3+ " -"node cluster)." -msgstr "" -"Pacemaker mimarisinin üç farklı niteliğini kurmak mümkündür. En uç ikisi " -"``Kapalı`` (her bileşen her düğümde çalışır) ve ``Ayrılmış``tır (her bileşen " -"kendi 3+ düğümlük kümesinde çalışır)." - -msgid "" -"It is storage and application-agnostic, and in no way specific to OpenStack." -msgstr "" -"Depolama ve uygulama bilgisi yoktur, herhangi bir yönden OpenStack'e özel " -"değildir." - -msgid "" -"It is very important that all members of the system share the same view of " -"who their peers are and whether or not they are in the majority. Failure to " -"do this leads very quickly to an internal `split-brain `_ state. This is where different parts of " -"the system are pulling in different and incompatible directions." -msgstr "" -"Sistemin tüm ögelerinin eşlerinin kim olduğuyla ve çoğunlukta olup " -"olmadıklarıyla ilgili aynı görüşe sahip olması çok önemlidir. Bunun " -"yapılmaması çabucak dahili bir `ayrık-beyin `_ durumuna yol açar. Bu sistemin farklı " -"parçalarının farklı ve uyumsuz yönlere çekmeye başladığı noktadır." - -msgid "List the nodes known to the quorum service" -msgstr "Yetersayı servisi tarafından bilinen düğümleri listele" - -msgid "Load distribution" -msgstr "Yük dağıtımı" - -msgid "" -"Locate your HAProxy instance on each OpenStack controller node in your " -"environment. The following is an example ``/etc/haproxy/haproxy.cfg`` " -"configuration file. Configure your instance using the following " -"configuration file, you will need a copy of it on each controller node." -msgstr "" -"Ortamınızdaki her bir OpenStack kontrol düğümündeki HAProxy sunucunuzu " -"bulun. Aşağıdaki bir ``/etc/haproxy/haproxy.cfg`` yapılandırma dosyası " -"örneğidir. Aşağıdaki yapılandırma dosyasıyla sunucunuzu yapılandırın, her " -"bir kontrol düğümünde bir kopyasına ihtiyacınız olacaktır." - -msgid "" -"Log in to the database client and grant the ``clustercheck`` user " -"``PROCESS`` privileges:" -msgstr "" -"Veritabanı istemcisine giriş yapın ve ``clustercheck`` kullanıcısına " -"``PROCESS`` yetkisi verin:" - -msgid "" -"Maintains a redundant instance that can be brought online when the active " -"service fails. For example, OpenStack writes to the main database while " -"maintaining a disaster recovery database that can be brought online if the " -"main database fails." -msgstr "" -"Etkin servis arızalandığında yedek bir sunucuyu ayağa kaldıracak şekilde " -"yönetir. Örneğin, OpenStack ana veritabanına yazarken ana veritabanı " -"arızalanırsa çevrimiçi yapılacak bir felaket durumu kurtarma veritabanı " -"yönetir." - -msgid "Make sure `pcs` is running and configured to start at boot time:" -msgstr "" -"`pcs`nin çalıştığına ve önyükleme aşamasında başlayacak şekilde " -"yapılandırıldığına emin olun:" - -msgid "" -"Make sure to save the changes once you are done. This will vary depending on " -"your distribution:" -msgstr "" -"İşiniz bittiğinde değişiklikleri kaydettiğinizden emin olun. Bu dağıtımınıza " -"göre değişiklik gösterecektir:" - -msgid "" -"Making the Block Storage (cinder) API service highly available in active/" -"active mode involves:" -msgstr "" -"Blok Depolama (cinder) API servisini etkin/etkin kipte yüksek kullanılır " -"yapmak şunları içerir:" - -msgid "" -"Making the Block Storage API service highly available in active/passive mode " -"involves:" -msgstr "" -"Blok Depolama API servisini etkin/pasif kipte yüksek kullanılabilir yapmak " -"şunları içerir:" - -msgid "" -"Making the OpenStack Identity service highly available in active and passive " -"mode involves:" -msgstr "" -"OpenStack Kimlik servisini etkin ve pasif kiplerde yüksek kullanılırlıklı " -"yapmak şunları içerir:" - -msgid "" -"Making the RabbitMQ service highly available involves the following steps:" -msgstr "RabbitMQ servisinin yüksek kullanılır olması şu adımları içerir:" - -msgid "" -"Making the Shared File Systems (manila) API service highly available in " -"active/passive mode involves:" -msgstr "" -"Paylaşımlı Dosya Sistemleri (manila) API servisini etkin/pasif kipte yüksek " -"kullanılır yapmak şunları içerir:" - -msgid "Management" -msgstr "Yönetim" - -msgid "" -"Managing the Block Storage API daemon with the Pacemaker cluster manager" -msgstr "Blok Depolama API artalan işini Pacemaker küme yöneticisi ile yönetmek" - -msgid "" -"Many services can act in an active/active capacity, however, they usually " -"require an external mechanism for distributing requests to one of the " -"available instances. The proxy server can serve this role." -msgstr "" -"Çoğu servis etkin/etkin kapasitede davranabilir, ancak genellikle istekleri " -"kullanılabilir sunuculardan birine dağıtabilecek harici bir mekanizmaya " -"ihtiyaç duyarlar. Vekil sunucu bu rolü üstlenebilir." - -msgid "Maximum number of network nodes to use for the HA router." -msgstr "HA yönlendirici için kullanılacak azami ağ düğümleri sayısı." - -msgid "" -"Maximum retries with trying to connect to RabbitMQ (infinite by default):" -msgstr "" -"RabbitMQ'ya bağlanmaya çalışırken kaç kere deneneceği (öntanımlı olarak " -"sonsuz):" - -msgid "Memcached" -msgstr "Memcached" - -msgid "" -"Memcached is a general-purpose distributed memory caching system. It is used " -"to speed up dynamic database-driven websites by caching data and objects in " -"RAM to reduce the number of times an external data source must be read." -msgstr "" -"Memcached genel amaçlı dağıtık hafıza önbellekleme sistemidir. Veri ve " -"nesneleri RAM'de tutarak harici bir veri kaynağının okunma sıklığını " -"düşürerek dinamik veritabanı güdümlü websitelerini hızlandırmak için " -"kullanılır." - -msgid "" -"Memcached is a memory cache demon that can be used by most OpenStack " -"services to store ephemeral data, such as tokens." -msgstr "" -"Memcached çoğu OpenStack servisi tarafından jetonlar gibi geçici verileri " -"tutmak için kullanılabilecek bir hafıza önbellekleme artalan işidir." - -msgid "" -"Memcached uses a timeout value, which should always be set to a value that " -"is higher than the heartbeat value set for Telemetry." -msgstr "" -"Memcached her zaman Telemetri için ayarlanan kalp atışı değerinden yüksek " -"olması gereken bir zaman aşımı değeri kullanır." - -msgid "Memory" -msgstr "Bellek" - -msgid "" -"Memory caching is managed by `oslo.cache `_. This " -"ensures consistency across all projects when using multiple Memcached " -"servers. The following is an example configuration with three hosts:" -msgstr "" -"Hafıza önbellekleme `oslo.cache `_ tarafından yönetilir. Bu " -"birden çok Memcached sunucusu kullanırken tüm projeler arasında tutarlılık " -"sağlar. Aşağıda üç sunuculu bir yapılandırma örneği görebilirsiniz:" - -msgid "Messaging service for high availability" -msgstr "Yüksek kullanılırlık için ileti servisi" - -msgid "" -"Minimum number of network nodes to use for the HA router. A new router can " -"be created only if this number of network nodes are available." -msgstr "" -"HA yönlendirici için kullanılacak asgari ağ düğümü sayısı. Yeni bir " -"yönlendirici ancak bu sayıda ağ düğümü kullanılabilirse oluşturulabilir." - -msgid "" -"Mirrored queues in RabbitMQ improve the availability of service since it is " -"resilient to failures." -msgstr "" -"RabbitMQ'de yansılı kuyruklar arızalara dayanıklı olduklarından servisin " -"kullanılabilirliğini artırır." - -msgid "Mixed" -msgstr "Karışık" - -msgid "MongoDB" -msgstr "MongoDB" - -msgid "" -"More details are available in the `user story `_ co-" -"authored by OpenStack's HA community and `Product Working Group `_ (PWG), where this feature is " -"identified as missing functionality in OpenStack, which should be addressed " -"with high priority." -msgstr "" -"Daha fazla ayrıntıyı OpenStack HA topluluğu ve `Ürün Çalışma Grubu `_ (PWG) tarafından ortak yazılan ve bu " -"özelliğin OpenStack'de eksik bir işlevsellik olduğunu, yüksek öncelikle ele " -"alınması gerektiğini bildiren `kullanıcı hikayesinde `_ bulabilirsiniz." - -msgid "More information is available in the RabbitMQ documentation:" -msgstr "RabbitMQ belgelendirmesinde daha fazla bilgi bulunur:" - -msgid "" -"Most OpenStack services can use Memcached to store ephemeral data such as " -"tokens. Although Memcached does not support typical forms of redundancy such " -"as clustering, OpenStack services can use almost any number of instances by " -"configuring multiple hostnames or IP addresses." -msgstr "" -"Çoğu OpenStack servisi jetonlar gibi geçici verileri saklamak için Memcached " -"kullanabilir. Memcached kümeleme gibi tipik yedekleme biçimlerini " -"desteklemese de, OpenStack servisleri çoklu makine adları veya IP adresleri " -"yapılandırarak istenen sayıda sunucuyu kullanabilir." - -msgid "" -"Most distributions ship an example configuration file (:file:`corosync.conf." -"example`) as part of the documentation bundled with the Corosync package. An " -"example Corosync configuration file is shown below:" -msgstr "" -"Çoğu dağıtım Corosync paketiyle beraber bir yapılandırma dosyası (:file:" -"`corosync.conf.example`) dağıtır. Örnek bir Corosync yapılandırma dosyası " -"aşağıda gösterilmiştir:" - -msgid "" -"Most high availability systems fail in the event of multiple independent " -"(non-consequential) failures. In this case, most implementations favor " -"protecting data over maintaining availability." -msgstr "" -"Çoğu yüksek kullanılabilir sistem birden fazla bağımsız arıza durumunda " -"başarısız olur (takip etmeyen). Bu durumda çoğu uygulama kullanılabilirliği " -"sağlamaktan ziyade veriyi korumayı ön plana alır." - -msgid "" -"Most high availability systems guarantee protection against system downtime " -"and data loss only in the event of a single failure. However, they are also " -"expected to protect against cascading failures, where a single failure " -"deteriorates into a series of consequential failures. Many service providers " -"guarantee a :term:`Service Level Agreement (SLA)` including uptime " -"percentage of computing service, which is calculated based on the available " -"time and system downtime excluding planned outage time." -msgstr "" -"Çoğu yüksek kullanılabilir sistem sistem kapalı süresine ve veri kaybına " -"karşı yalnızca tek bir arıza olayında koruma garantiler. Ancak aynı zamanda " -"bir arızanın bir çok arızayı tetiklediği takip eden arızalara karşı " -"korumaları beklenir. Çoğu servis sağlayıcı planlı kesintiler ve sistem " -"kapalı süresi çıkarıldıktan sonra kalan kullanılabilir süreyi taban alarak " -"hesaplanan bir :term:`Servis Seviyesi Anlaşması (SLA)` ile hesaplama " -"servisinin açık kalma süresini garantiler." - -msgid "" -"Multicast groups (``mcastaddr``) must not be reused across cluster " -"boundaries. No two distinct clusters should ever use the same multicast " -"group. Be sure to select multicast addresses compliant with `RFC 2365, " -"\"Administratively Scoped IP Multicast\" `_." -msgstr "" -"Çoklu yayın grupları (``mcastaddr``) küme sınırları arasında tekrar " -"kullanılmamalıdır. Ayrı iki küme aynı çoklu yayın grubunu kullanmamalıdır. " -"`RFC 2365, \"Yönetimsel Kapsamlı IP Çoklu Yayını\" `_ ile uyumlu çoklu yayın adresleri seçtiğinizden emin olun." - -msgid "" -"MySQL databases, including MariaDB and Percona XtraDB, manage their " -"configurations using a ``my.cnf`` file, which is typically located in the ``/" -"etc`` directory. Configuration options available in these databases are also " -"available in Galera Cluster, with some restrictions and several additions." -msgstr "" -"MariaDB ve Percona XtraDB dahil MySQL veritabanları yapılandırmalarını " -"genellikle ``/etc`` dizininde bulunan bir ``my.cnf`` dosyasında yönetirler. " -"Bu veritabanlarında bulunan yapılandırma seçenekleri bazı kısıtlamalar ve " -"bir çok ek ile Galera Kümesinde de kullanılabilir." - -msgid "NIC" -msgstr "NIC" - -msgid "Network components, such as switches and routers" -msgstr "Ağ bileşenleri, örneğin anahtarlar ve yönlendiriciler" - -msgid "Networking L2 agent" -msgstr "Ağ L2 aracısı" - -msgid "No firewalls between the hosts" -msgstr "Sunucular arasında güvenlik duvarı yok" - -msgid "Node type" -msgstr "Düğüm türü" - -msgid "Note the following about the recommended interface configuration:" -msgstr "Önerilen arayüz yapılandırmasıyla ilgili şunları not edin:" - -msgid "Note the following:" -msgstr "Şunları unutmayın:" - -msgid "" -"Older versions of some distributions, which do not have an up-to-date policy " -"for securing Galera, may also require SELinux to be more relaxed about " -"database access and actions:" -msgstr "" -"Galera'yı güvenli hale getirmek için güncel ilkeleri olmayan bazı " -"dağıtımların eski sürümleri de SELinux'un veritabanı erişimi ve eylemleri " -"konusunda daha esnek olmasını bekleyebilir:" - -msgid "On CentOS, RHEL, openSUSE, and SLES:" -msgstr "CentOS, RHEL, openSUSE ve SLES üzerinde:" - -msgid "" -"On RHEL-based systems, create resources for cinder's systemd agents and " -"create constraints to enforce startup/shutdown ordering:" -msgstr "" -"RHEL tabanlı sistemlerde, cinder'in systemd aracıları için kaynaklar " -"oluşturun ve başlangıç/kapatma sıralamasını zırlayacak kısıtlar oluşturun:" - -msgid "" -"On ``3306``, Galera Cluster uses TCP for database client connections and " -"State Snapshot Transfers methods that require the client, (that is, " -"``mysqldump``)." -msgstr "" -"``3306`` üzerinde, Galera Kümesi veritabanı istemci bağlantıları ve " -"istemciyi gerektiren Durum Anlık Görüntü Aktarımları için TCP kullanır, " -"(yani ``mysqldump``)." - -msgid "" -"On ``4444``, Galera Cluster uses TCP for all other State Snapshot Transfer " -"methods." -msgstr "" -"``4444`` üzerinde, Galera Kümesi diğer tüm Durum Anlık Görüntü Aktarım " -"yöntemleri için TCP kullanır." - -msgid "" -"On ``4567``, Galera Cluster uses TCP for replication traffic. Multicast " -"replication uses both TCP and UDP on this port." -msgstr "" -"``4567`` üzerinde, Galera Kümesi çoğaltma trafiği için TCP kullanır. Çoklu " -"yayın çoğaltma bu bağlantı noktası üzerinde hem TCP hem UDP kullanır." - -msgid "On ``4568``, Galera Cluster uses TCP for Incremental State Transfers." -msgstr "" -"``4568`` üzerinde, Galera Kümesi Artan Durum Aktarımları için TCP kullanır." - -msgid "" -"On any host that is meant to be part of a Pacemaker cluster, establish " -"cluster communications through the Corosync messaging layer. This involves " -"installing the following packages (and their dependencies, which your " -"package manager usually installs automatically):" -msgstr "" -"Bir Pacemaker kümesinin parçası olacak herhangi bir sunucu üzerinde, küme " -"iletişimini Corosync iletileme katmanı ile sağlayın. Bu aşağıdaki paketlerin " -"kurulumunu içerir (ve bağımlılıklarını, paket yöneticiniz genellikle " -"otomatik olarak kurar):" - -msgid "" -"On each target node, verify the correct owner, group, and permissions of the " -"file :file:`erlang.cookie`:" -msgstr "" -"Her bir hedef düğümde, :file:`erlang.cookie` dosyasının doğru sahip, grup ve " -"izinleri olduğunu doğrulayın:" - -msgid "" -"On the infrastructure layer, the SLA is the time for which RabbitMQ cluster " -"reassembles. Several cases are possible. The Mnesia keeper node is the " -"master of the corresponding Pacemaker resource for RabbitMQ. When it fails, " -"the result is a full AMQP cluster downtime interval. Normally, its SLA is no " -"more than several minutes. Failure of another node that is a slave of the " -"corresponding Pacemaker resource for RabbitMQ results in no AMQP cluster " -"downtime at all." -msgstr "" -"Altyapı katmanında, SLA RabbitMQ kümesinin yeniden bir araya gelme " -"süresidir. Bir çok durum mümkündür. Mnesia tutucu düğümü RabbitMQ için " -"ilişkili Pacemaker kaynağının anasıdır. Başarısız olduğunda, sonuç tüm AMQP " -"kümesinin kapalı süre aralığıdır. Normalde SLA'sı bir kaç dakikadan fazla " -"değildir. RabbitMQ için ilişkili Pacemaker kaynağının kölesi olan başka bir " -"düğümün arızalanması herhangi bir AMQP kümesinin kapalı kalmasına sebep " -"olmaz." - -msgid "" -"Once completed, commit your configuration changes by entering :command:" -"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " -"Block Storage API service and its dependent resources on one of your nodes." -msgstr "" -"Tamamlandığında, yapılandırma değişikliklerinizi :command:`crm configure` " -"menüsünden :command:`commit` girerek kaydedin. Pacemaker ardından Blok " -"Depolama API servisini ve bağımlı kaynaklarını düğümlerinizden birinde " -"başlatır." - -msgid "" -"Once created, synchronize the :file:`corosync.conf` file (and the :file:" -"`authkey` file if the secauth option is enabled) across all cluster nodes." -msgstr "" -"Oluşturulduktan sonra, :file:`corosync.conf` dosyasını tüm küme düğümlerinde " -"eşitleyin (secauth seçeneği etkinse :file:`authkey` dosyasını da)." - -msgid "" -"Once the database server starts, check the cluster status using the " -"``wsrep_cluster_size`` status variable. From the database client, run the " -"following command:" -msgstr "" -"Veritabanı başladıktan sonra ``wsrep_cluster_size`` durum değişkenini " -"kullanarak küme durumunu kontrol edin. Veritabanı istemcisinden şu komutları " -"çalıştırın:" - -msgid "" -"One physical server can support multiple nodes, each of which supports " -"almost any number of network interfaces." -msgstr "" -"Bir fiziksel sunucu her biri istenen sayıda ağ arayüzüne sahip birden çok " -"düğümü destekleyebilir." - -msgid "" -"Only one instance for the central and compute agent service(s) is able to " -"run and function correctly if the ``backend_url`` option is not set." -msgstr "" -"``backend_url`` seçeneği ayarlı değilse merkezi ve hesaplama aracı " -"servis(ler)i için yalnızca tek bir sunucu çalışıp düzgün işleyebilir." - -msgid "" -"OpenStack APIs: APIs that are HTTP(s) stateless services written in python, " -"easy to duplicate and mostly easy to load balance." -msgstr "" -"OpenStack API'leri: Python ile yazılmış HTTP(s) durumsuz servisler olan " -"API'ler, çoğaltması ve yük dengelemesi kolaydır." - -msgid "OpenStack Block Storage" -msgstr "OpenStack Blok Depolama" - -msgid "OpenStack Compute" -msgstr "OpenStack Hesaplama" - -msgid "OpenStack High Availability Guide" -msgstr "OpenStack Yüksek Kullanılırlık Kılavuzu" - -msgid "OpenStack Networking" -msgstr "OpenStack Ağı" - -msgid "" -"OpenStack currently meets such availability requirements for its own " -"infrastructure services, meaning that an uptime of 99.99% is feasible for " -"the OpenStack infrastructure proper. However, OpenStack does not guarantee " -"99.99% availability for individual guest instances." -msgstr "" -"OpenStack şu anda bu kullanılırlık gereksinimlerini kendi alt yapı " -"servislerinde sağlayabiliyor, yani düzgün bir OpenStack alt yapısında 99.99% " -"çalışma zamanı elde edilebilir. Ancak OpenStack bağımsız misafir sunucular " -"için 99.99% kullanılırlık garanti etmez." - -msgid "" -"OpenStack does not require a significant amount of resources and the " -"following minimum requirements should support a proof-of-concept high " -"availability environment with core services and several instances:" -msgstr "" -"OpenStack kayda değer miktarda kaynak gerektirmez ve aşağıdaki asgari " -"gereklilikler kavram ispatı niteliğinde çekirdek servisler ve bir çok " -"sunucuya sahip yüksek kullanılabilir bir ortamı destekler:" - -msgid "" -"OpenStack is a set of services exposed to the end users as HTTP(s) APIs. " -"Additionally, for your own internal usage, OpenStack requires an SQL " -"database server and AMQP broker. The physical servers, where all the " -"components are running, are called controllers. This modular OpenStack " -"architecture allows you to duplicate all the components and run them on " -"different controllers. By making all the components redundant, it is " -"possible to make OpenStack highly available." -msgstr "" -"OpenStack son kullanıcılara HTTP(s) API'leri olarak ortaya çıkan servisler " -"kümesidir. Ek olarak, dahili kullanımınız için, OpenStack bir SQL veritabanı " -"sunucusu ve AMQP aracısı gerektirir. Tüm bileşenlerin çalıştığı fiziksel " -"sunuculara denetleyiciler denir. Bu modüler OpenStack mimarisi tüm " -"bileşnleri çoğaltabilmenize farklı denetleyiciler üzerinde " -"çalıştırabilmenize izin verir. Tüm bileşenleri yedekli yaparak, OpenStack'i " -"yüksek kullanılır hale getirmek mümkündür." - -msgid "OpenStack network nodes contain:" -msgstr "OpenStack ağ düğümü şunu içerir:" - -msgid "" -"OpenStack services are configured with the list of these IP addresses so " -"they can select one of the addresses from those available." -msgstr "" -"OpenStack servisleri bu IP adreslerinin listesiyle yapılandırılır yani bu " -"kullanılabilir olanlardan bir adresi seçebilirler." - -msgid "" -"OpenStack supports a single-controller high availability mode that is " -"managed by the services that manage highly available environments but is not " -"actually highly available because no redundant controllers are configured to " -"use for failover. This environment can be used for study and demonstration " -"but is not appropriate for a production environment." -msgstr "" -"OpenStack arıza durumunda yer değiştirme için yapılandırılmış yedekleme " -"denetleyicileri olmadığı için aslında yüksek kullanılabilir olmayan yüksek " -"kullanılırlık ortamlarını yönetmek için tek-denetleyicili yüksek " -"kullanılabilirlik kipini destekler. Bu ortam çalışma ve gösterim amaçlı " -"kullanılabilir ama üretim ortamları için uygun değildir." - -msgid "Overview of highly available controllers" -msgstr "Yüksek kullanılabilir denetleyicilerin genel görünümü" - -msgid "Pacemaker cluster stack" -msgstr "Pacemaker küme yığını" - -msgid "" -"Pacemaker does not inherently understand the applications it manages. " -"Instead, it relies on resource agents (RAs) that are scripts that " -"encapsulate the knowledge of how to start, stop, and check the health of " -"each application managed by the cluster." -msgstr "" -"Pacemaker yönettiği uygulamaları doğuştan anlamaz. Bunun yerine küme " -"tarafından yönetilen her bir uygulamanın başlatma, durdurma ve sağlığını " -"kontrol etme bilgisini içeren betiklerden oluşan kaynak aracılarına (RA'lar) " -"güvenir." - -msgid "" -"Pacemaker now starts the OpenStack Identity service and its dependent " -"resources on all of your nodes." -msgstr "" -"Pacemaker artık OpenStack Kimlik servisini ve tüm bağımlı kaynakları tüm " -"düğümlerinizde başlatır." - -msgid "" -"Pacemaker now starts the Shared File Systems API service and its dependent " -"resources on one of your nodes." -msgstr "" -"Pacemaker şimdi Paylaşımlı Dosya Sistemleri API servisini ve bağımlı olduğu " -"kaynakları düğümlerinizden birinde başlatır." - -msgid "" -"Pacemaker relies on the `Corosync `_ " -"messaging layer for reliable cluster communications. Corosync implements the " -"Totem single-ring ordering and membership protocol. It also provides UDP and " -"InfiniBand based messaging, quorum, and cluster membership to Pacemaker." -msgstr "" -"Pacemaker güvenilir küme iletişimi için `Corosync `_ mesajlaşma katmanına güvenir. Corosync Totem tek-halka " -"sıralama ve abonelik iletişim kuralını uygular. Ayrıca Pacemaker'e UDP ve " -"InfiniBand tabanlı mesajlaşma, yetersayı ve küme üyeliği sağlar." - -msgid "" -"Pacemaker ships with a large set of OCF agents (such as those managing MySQL " -"databases, virtual IP addresses, and RabbitMQ), but can also use any agents " -"already installed on your system and can be extended with your own (see the " -"`developer guide `_)." -msgstr "" -"Pacemaker büyük bir OCF aracı kümesiyle gelir (MySQL veritabanlarını, sanal " -"IP adreslerini ve RabbitMQ'yi yönetenler gibi), ama sisteminizde kurulu " -"herhangi bir aracıyı da kullanabilir ve kendi aracılarınızla " -"genişletilebilir (bknz `geliştirici kılavuzu `_)." - -msgid "" -"Pacemaker then starts the OpenStack Image API service and its dependent " -"resources on one of your nodes." -msgstr "" -"Pacemaker ardından OpenStack İmaj API servisini ve bağımlı olduğu kaynakları " -"düğümlerinizden birinde başlatır." - -msgid "" -"Pacemaker uses an event-driven approach to cluster state processing. The " -"``cluster-recheck-interval`` parameter (which defaults to 15 minutes) " -"defines the interval at which certain Pacemaker actions occur. It is usually " -"prudent to reduce this to a shorter interval, such as 5 or 3 minutes." -msgstr "" -"Pacemaker küme durumu işlemede olay güdümlü bir yaklaşım kullanır. ``cluster-" -"recheck-interval`` (öntanımlı olarak 15 dakikadır) parametresi belli " -"Pacemaker eylemlerinin oluş aralığını tanımlar. Bunu 5 veya 3 dakikalık daha " -"kısa bir aralığa kısaltmak mantıklıdır." - -msgid "Parameter" -msgstr "Parametre" - -msgid "" -"Persistent block storage can survive instance termination and can also be " -"moved across instances like any external storage device. Cinder also has " -"volume snapshots capability for backing up the volumes." -msgstr "" -"Kalıcı blok depolama sunucu sonlandırmasından kurtulabilir ve ayrıca " -"herhangi bir harici depolama sürücüsü gibi sunucular arasında taşınabilir. " -"Cinder ayrıca birimleri yedeklemek için birim anlık görüntü yeteneğine " -"sahiptir." - -msgid "" -"Persistent storage exists outside all instances. Two types of persistent " -"storage are provided:" -msgstr "" -"Kalıcı depolama tüm sunucuların dışında mevcuttur. İki tür kalıcı depolama " -"sağlanmıştır:" - -msgid "Possible options are:" -msgstr "Muhtemel seçenekler:" - -msgid "Prerequisites" -msgstr "Giriş koşulları" - -msgid "Processor Cores" -msgstr "İşlemci Çekirdekleri" - -msgid "" -"Production servers should run (at least) three RabbitMQ servers for testing " -"and demonstration purposes, however it is possible to run only two servers. " -"In this section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. " -"To build a broker, ensure that all nodes have the same Erlang cookie file." -msgstr "" -"Üretim sunucuları deneme ve gösterim amaçlı (en az) üç RabbitMQ sunucusu " -"çalıştırmalıdır, ancak iki sunucu çalıştırmak da mümkündür. Bu kısımda, " -"``rabbit1`` ve ``rabbit2`` isimli iki düğüm yapılandırıyoruz. Bir aracı inşa " -"etmek için tüm düğümlerin aynı Erlang çerez dosyasına sahip olduğundan emin " -"olun." - -msgid "Proxy server" -msgstr "Vekil sunucu" - -msgid "Query the quorum status" -msgstr "Yetersayı durumunu sorgula" - -msgid "" -"Quorum becomes important when a failure causes the cluster to split in two " -"or more partitions. In this situation, you want the majority members of the " -"system to ensure the minority are truly dead (through fencing) and continue " -"to host resources. For a two-node cluster, no side has the majority and you " -"can end up in a situation where both sides fence each other, or both sides " -"are running the same services. This can lead to data corruption." -msgstr "" -"Yetersayı bir arızanın kümenin iki ya da daha fazla parçaya bölünmesine " -"sebep olduğu arıza durumlarında önemli hale gelir. Bu durumda, sistem " -"üyelerinin çoğunluğunun azınlığın gerçekten ölü (parmaklıklarla) olduğundan " -"emin olarak kaynakları sunmayı devam ettirmesini istersiniz. İki düğümlü bir " -"kümede, hiçbir taraf çoğunluğu oluşturmaz, iki tarafın da birbirini " -"yalıttığı ya da iki tarafın da aynı servisleri çalıştırdığı bir durumla " -"karşılaşabilirsiniz. Bu veri bozukluğuna yol açabilir." - -msgid "RAID drives" -msgstr "RAID sürücüler" - -msgid "RabbitMQ" -msgstr "RabbitMQ" - -msgid "" -"RabbitMQ HA cluster Transport URL using ``[user:pass@]host:port`` format:" -msgstr "" -"``[kullanıcı:parola@]sunucu:bağlantınoktası`` biçimini kullanan RabbitMQ HA " -"küme Aktarım URL'si:" - -msgid "" -"RabbitMQ nodes fail over on the application and the infrastructure layers." -msgstr "" -"RabbitMQ düğümleri uygulama ve altyapı katmanlarında arıza giderme yapar." - -msgid "Receive notifications of quorum state changes" -msgstr "Yetersayı durum değişikliklerinde bildirileri al" - -msgid "Recommended for testing." -msgstr "Deneme amaçlı önerilir." - -msgid "Recommended solution by the Tooz project." -msgstr "Tooz projesi tarafından önerilen çözüm." - -msgid "Red Hat" -msgstr "Red Hat" - -msgid "Redundancy and failover" -msgstr "Yedeklilik ve arıza ele alma" - -msgid "" -"Regardless of which flavor you choose, we recommend that clusters contain at " -"least three nodes so that you can take advantage of `quorum `_." -msgstr "" -"Hangi niteliği seçtiğinizden bağımsız olarak, `yetersayıdan `_ " -"faydalanabilmek için kümelerin en az üç düğüm içermesini öneriyoruz." - -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database." -msgstr "" -"``CINDER_DBPASS`` anahtarını Blok Depolama veritabanı için seçtiğiniz " -"parolayla değiştirin." - -msgid "" -"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " -"database. Replace ``CINDER_PASS`` with the password you chose for the " -"``cinder`` user in the Identity service." -msgstr "" -"``CINDER_DBPASS`` anahtarını Blok Depolama veritabanı için seçtiğiniz " -"parolayla değiştirin. ``CINDER_PASS`` anahtarını Kimlik servisinde " -"``cinder`` kullanıcısı için seçtiğiniz parola ile değiştirin." - -msgid "" -"Replace the IP addresses given here with comma-separated list of each " -"OpenStack database in your cluster." -msgstr "" -"Burada verilen IP adreslerini kümenizdeki her bir OpenStack veritabanının " -"virgülle ayrılmış listesiyle değiştirin." - -msgid "" -"Restart AppArmor. For servers that use ``init``, run the following command:" -msgstr "" -"AppArmor'u yeniden başlatın. ``init`` kullanan sunucularda şu komutu " -"çalıştırın:" - -msgid "Restart the HAProxy service." -msgstr "HAProxy servisini yeniden başlatın." - -msgid "Restart the host or, to make changes work immediately, invoke:" -msgstr "" -"Değişikliklerin hemen etkin olması için sunucuyu yeniden başlatın, ya da " -"şunu çalıştırın:" - -msgid "Restarting the cluster" -msgstr "Kümenin yenidene başlatılması" - -msgid "Retry connecting with RabbitMQ:" -msgstr "RabbitMQ ile bağlanmayı yeniden dene:" - -msgid "Run Networking DHCP agent" -msgstr "Ağ DHCP aracısını çalıştır" - -msgid "Run Networking L3 agent" -msgstr "Ağ L3 aracısı çalıştır" - -msgid "Run the following commands on each node except the first one:" -msgstr "İlk düğüm hariç geri kalan düğümlerde aşağıdaki komutları çalıştırın:" - -msgid "" -"Run the following commands to download the OpenStack Identity resource to " -"Pacemaker:" -msgstr "" -"OpenStack Kimlik kaynağını Pacemaker'e indirmek için şu komutları çalıştırın:" - -msgid "SELinux" -msgstr "SELinux" - -msgid "SELinux and AppArmor set to permit access to ``mysqld``" -msgstr "SELinux ve AppArmor ``mysqld``ye izin verecek şekilde ayarlanmış" - -msgid "SUSE" -msgstr "SUSE" - -msgid "" -"SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE, use a " -"set of OCF agents for controlling OpenStack services." -msgstr "" -"SUSE Kurumsal Linux ve SUSE tabanlı dağıtımlar, örneğin openSUSE, OpenStack " -"servislerini kontrol etmek için bir takım OCF aracıları kullanır." - -msgid "" -"Security-Enhanced Linux is a kernel module for improving security on Linux " -"operating systems. It is commonly enabled and configured by default on Red " -"Hat-based distributions. In the context of Galera Cluster, systems with " -"SELinux may block the database service, keep it from starting, or prevent it " -"from establishing network connections with the cluster." -msgstr "" -"Gelişmiş-Güvenlikli Linux Linux işletim sistemlerinde güvenliği artıran bir " -"çekirdek modülüdür. Genellikle etkindir ve Red Hat tabanlı dağıtımlarda " -"öntanımlı olarak yapılandırılır. Galera Kümesi kapsamında, SELinux'a sahip " -"sistemler veritabanı servisini engelleyerek başlamasını ya da küme ile ağ " -"bağlantısı kurmasını önleyebilirler." - -msgid "Segregated" -msgstr "Ayrılmış" - -msgid "" -"Services like RabbitMQ and Galera have complicated boot-up sequences that " -"require co-ordination, and often serialization, of startup operations across " -"all machines in the cluster. This is especially true after a site-wide " -"failure or shutdown where you must first determine the last machine to be " -"active." -msgstr "" -"RabbitMQ ve Galera gibi servislerin genellikle kümedeki tüm makineler " -"arasında başlangıç koordinasyonu ve yaygın olarak serileştirme gerektiren " -"karmaşık önyükleme düzenleri bulunur. Bu özellikle ilk önce etkin olacak son " -"makineye karar vermenizi gerektiren site genelinde bir arıza ya da kapatma " -"durumunda doğrudur." - -msgid "Set a password for hacluster user on each host:" -msgstr "Hacluster kullanıcısı için her bir sunucuda parola ayarlayın:" - -msgid "Set automatic L3 agent failover for routers" -msgstr "Yönlendiriciler için otomatik L3 aracısı kurtarmayı ayarla" - -msgid "Set basic cluster properties" -msgstr "Temel küme özelliklerini ayarlayın" - -msgid "Set up Corosync with multicast" -msgstr "Corosync'i çoklu yayın ile ayarla" - -msgid "Set up Corosync with unicast" -msgstr "Corosync'i tekli yayın ile ayarla" - -msgid "Set up Corosync with votequorum library" -msgstr "Corosync'i votequorum kitaplığıyla ayarla" - -msgid "Set up the cluster with `crmsh`" -msgstr "Kümeyi `crmsh` ile ayarla" - -msgid "Set up the cluster with pcs" -msgstr "Kümeyi pcs ile ayarla" - -msgid "" -"Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) " -"feature. By default, it is disabled (set to 0). If a cluster is on the " -"quorum edge (``expected_votes:`` set to 7; ``online nodes:`` set to 4) for " -"longer than the time specified for the ``last_man_standing_window`` " -"parameter, the cluster can recalculate quorum and continue operating even if " -"the next node will be lost. This logic is repeated until the number of " -"online nodes in the cluster reaches 2. In order to allow the cluster to step " -"down from 2 members to only 1, the ``auto_tie_breaker`` parameter needs to " -"be set. We do not recommended this for production environments." -msgstr "" -"``last_man_standing`` anahtarını 1 olarak ayarlamak Ayakta Kalan Son Adam " -"(LMS) özelliğini etkinleştirir. Öntanımlı olarak kapalıdır (0 olarak " -"ayarlıdır). Bir küme ``last_man_standing_window`` parametresinden daha uzun " -"süre yetersayı sınırındaysa (``expected_votes:`` 7; ``online nodes:`` 4 " -"olarak ayarlıysa) küme yetersayıyı tekrar hesaplayıp sonraki düğüm kaybolsa " -"bile işlemeye devam edebilir. Bu mantık kümedeki çevrimiçi düğüm sayısı 2'yi " -"bulana kadar devam eder. Kümenin 2 üyeden 1 üyeye düşebilmesi için " -"``auto_tie_breaker`` parametresinin ayarlanması gerekir. Bunu üretim " -"ortamları için önermiyoruz." - -msgid "" -"Setting the ``pe-warn-series-max``, ``pe-input-series-max``, and ``pe-error-" -"series-max`` parameters to 1000 instructs Pacemaker to keep a longer history " -"of the inputs processed and errors and warnings generated by its Policy " -"Engine. This history is useful if you need to troubleshoot the cluster." -msgstr "" -"``pe-warn-series-max``, ``pe-input-series-max``, ve ``pe-error-series-max`` " -"parametrelerini 1000 olarak ayarlamak Pacemaker'e işlenen girdilerin ve İlke " -"Motoru tarafından üretilen hata ve uyarıların daha uzun bir geçmişini " -"tutmasını söyler. Bu geçmiş kümede arıza tesbiti yapmak isterseniz " -"faydalıdır." - -msgid "Simplified process for adding/removing of nodes" -msgstr "Düğümlerin eklenip/çıkarılması için basitleştirilmiş süreç" - -msgid "" -"Since all API access is directed to the proxy, adding or removing nodes has " -"no impact on the configuration of other services. This can be very useful in " -"upgrade scenarios where an entirely new set of machines can be configured " -"and tested in isolation before telling the proxy to direct traffic there " -"instead." -msgstr "" -"Tüm API erişimi vekile yönlendirildiğinden, düğüm ekleme ve çıkarmanın diğer " -"servis yapılandırmaları üzerinde etkisi yoktur. Bu tamamen yeni bir makine " -"kümesinin yalıtılarak yapılandırılıp denendiği ve ardından vekile trafik " -"yönlendirmesinin söylendiği güncelleme senaryolarında oldukça faydalıdır." - -msgid "" -"Since the cluster is a single administrative domain, it is acceptable to use " -"the same password on all nodes." -msgstr "" -"Küme tek bir yönetim alanında olduğundan, tüm düğümlerde aynı parolayı " -"kullanmak kabul edilebilir." - -msgid "Single-controller high availability mode" -msgstr "Tek-denetleyicili yüksek kullanılabilirlik kipi" - -msgid "" -"Specifying ``corosync_votequorum`` enables the votequorum library. This is " -"the only required option." -msgstr "" -"``corosync_votequorum`` votequorum kitaplığını etkinleştirir. Bu gerekli tek " -"seçenektir." - -msgid "Start Corosync" -msgstr "Corosync'i Başlat" - -msgid "Start Pacemaker" -msgstr "Pacemaker'i Başlat" - -msgid "Start ``corosync`` with systemd unit file:" -msgstr "``corosync``i systemd birim dosyasıyla başlat:" - -msgid "Start ``corosync`` with the LSB init script:" -msgstr "``corosync`` i LSB başlangıç betiğiyle başlat:" - -msgid "Start ``corosync`` with upstart:" -msgstr "``corosync`` i upstart ile başlat:" - -msgid "Start ``pacemaker`` with the LSB init script:" -msgstr "``pacemaker``i LSB başlangıç betiğiyle başlatın:" - -msgid "Start ``pacemaker`` with the systemd unit file:" -msgstr "``pacemaker``i systemd birim dosyasıyla başlatın:" - -msgid "Start ``pacemaker`` with upstart:" -msgstr "``pacemaker``i upstart ile başlatın:" - -msgid "" -"Start the ``xinetd`` daemon for ``clustercheck``. For servers that use " -"``init``, run the following commands:" -msgstr "" -"``clustercheck`` için ``xinetd`` artalan işini başlatın. ``init`` kullanan " -"sunucular için aşağıdaki komutları çalıştırın:" - -msgid "" -"Start the database server on all other cluster nodes. For servers that use " -"``init``, run the following command:" -msgstr "" -"Dİğer tüm küme düğümlerinde veritabanı sunucusunu başlatın. ``init`` " -"kullanan sunucularda şu komutu çalıştırın:" - -msgid "" -"Start the message queue service on all nodes and configure it to start when " -"the system boots. On Ubuntu, it is configured by default." -msgstr "" -"Tüm düğümlerde ileti kuyruğu servisini başlatın ve sistem önyüklenirken " -"başlayacak şekilde yapılandırın. Ubuntu üzerinde öntanımlı olarak " -"yapılandırılır." - -msgid "Stateful service" -msgstr "Durumsal servis" - -msgid "" -"Stateful services can be configured as active/passive or active/active, " -"which are defined as follows:" -msgstr "" -"Durumsal servisler etkin/pasif veya etkin/etkin olarak " -"yapılandırılabilirler, şu şekilde tanımlayabiliriz:" - -msgid "Stateless service" -msgstr "Durumsuz servis" - -msgid "Stateless versus stateful services" -msgstr "Durumsuz karşısında durumsal servisler" - -msgid "" -"Stop RabbitMQ and copy the cookie from the first node to each of the other " -"node(s):" -msgstr "RabbitMQ'yu durdurun ve çerezi ilk düğümden diğer düğüm(ler)e dağıtın:" - -msgid "Storage" -msgstr "Depolama" - -msgid "Storage back end" -msgstr "Depolama arka ucu" - -msgid "Storage components" -msgstr "Depolama bileşenleri" - -msgid "" -"System downtime: Occurs when a user-facing service is unavailable beyond a " -"specified maximum amount of time." -msgstr "" -"Sistem kapalı süresi: Kullanıcı taraflı bir servisin belirli bir azami " -"süreden daha fazla kullanılamaz olduğu durumlardır." - -msgid "Telemetry" -msgstr "Telemetri" - -msgid "Telemetry polling agent" -msgstr "Telemetri yoklama aracısı" - -msgid "" -"The :command:`crm configure` command supports batch input. Copy and paste " -"the lines in the next step into your live Pacemaker configuration and then " -"make changes as required." -msgstr "" -":command:`crm configure` komutu çoklu girdi destekler. Sonraki adımdaki " -"satırları kopyalayın ve canlı Pacemaker yapılandırmanıza yapıştırarak " -"gerekli değişiklikleri yapın." - -msgid "" -"The :command:`crm configure` supports batch input. Copy and paste the lines " -"in the next step into your live Pacemaker configuration and then make " -"changes as required." -msgstr "" -":command:`crm configure` çoklu girdiyi destekler. Sonraki adımdaki satırları " -"kopyalayın ve canlı Pacemaker yapılandırmanıza yapıştırın ardından gerekli " -"değişiklikleri yapın." - -msgid "" -"The :command:`crm configure` supports batch input. You may have to copy and " -"paste the above lines into your live Pacemaker configuration, and then make " -"changes as required." -msgstr "" -":command:`crm configure` toplu giriş destekler. Yukarıdaki satırları canlı " -"Pacemaker yapılandırmanıza kopyalayıp yapıştırmanız, ve gerektikçe " -"değişiklikler yapmanız gerekebilir." - -msgid "" -"The Block Storage service (cinder) that can use LVM or Ceph RBD as the " -"storage back end." -msgstr "" -"Depolama arka ucu olarak LVM veya Ceph RBD kullanan Blok Depolama servisi " -"(cinder)." - -msgid "" -"The Galera cluster configuration directive ``backup`` indicates that two of " -"the three controllers are standby nodes. This ensures that only one node " -"services write requests because OpenStack support for multi-node writes is " -"not yet production-ready." -msgstr "" -"Galera kümesi yapılandırma yönergesi ``backup`` üç kontrol düğümünden " -"ikisinin bekleme düğümü olduğunu gösterir. Böylece OpenStack çoklu-düğüm " -"yazma henüz üretim için hazır olmadığından tek bir düğümün yazma isteklerini " -"karşılması sağlanır." - -msgid "" -"The Image service (glance) that can use the Object Storage service (swift) " -"or Ceph RBD as the storage back end." -msgstr "" -"Nesne Depolama servisini (swift) ya da Ceph RBD'yi depolama arka ucu olarak " -"kullanabilen İmaj servisi (glance)." - -msgid "" -"The L2 agent cannot be distributed and highly available. Instead, it must be " -"installed on each data forwarding node to control the virtual network driver " -"such as Open vSwitch or Linux Bridge. One L2 agent runs per node and " -"controls its virtual interfaces." -msgstr "" -"L2 aracısı dağıtılamaz ve yüksek kullanılabilir değildir. Bunun yerine Open " -"vSwitch veya Linux Köprüsü gibi sanal ağ sürücüsünü kontrol etmek için her " -"bir veri yönlendiren düğüm üzerinde yüklenmelidir. Düğüm başına bir L2 " -"aracısı çalışır ve sanal arayüzlerini kontrol eder." - -msgid "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance impacts only a percentage of the objects " -"and the client automatically removes it from the list of instances. The SLA " -"is several minutes." -msgstr "" -"Memcached istemcisi nesneleri sunucular arasında dengelemek için özetleme " -"uygular. Bir sunucu arızası yalnızca nesnelerin belli bir yüzdesini etkiler " -"ve istemci otomatik olarak bu sunucuyu listeden çıkarır. SLA birkaç " -"dakikadır." - -msgid "" -"The Memcached client implements hashing to balance objects among the " -"instances. Failure of an instance only impacts a percentage of the objects, " -"and the client automatically removes it from the list of instances." -msgstr "" -"Memcached istemcisi nesneleri sunucular arasında dengelemek için özetleme " -"uygular. Bir sunucu arızası yalnızca nesnelerin belirli bir yüzdesini " -"etkiler, ve istemci otomatik olarak sunucu listesinden bu sunucuyu çıkarır." - -msgid "" -"The Networking (neutron) service L3 agent is scalable, due to the scheduler " -"that supports Virtual Router Redundancy Protocol (VRRP) to distribute " -"virtual routers across multiple nodes. For more information about the VRRP " -"and keepalived, see `Linux bridge: High availability using VRRP `_ and " -"`Open vSwitch: High availability using VRRP `_." -msgstr "" -"Ağ (neutron) servis L3 aracısı birden çok düğüm arasında sanal " -"yönlendiriciler dağıtmak için Sanal Yönlendirici Yedeklilik İletişim Kuralı " -"(VRRP) destekleyen zamanlayıcı sebebiyle ölçeklenebilirdir. VRRP ve " -"keepalived hakkında daha fazla bilgi için `Linux köprüsü: VRRP kullanarak " -"yüksek kullanılırlık `_ ve `Open vSwitch: VRRP kullanarak yüksek " -"kullanılırlık `_ belgelerine göz atın." - -msgid "" -"The OpenStack Image service offers a service for discovering, registering, " -"and retrieving virtual machine images. To make the OpenStack Image API " -"service highly available in active/passive mode, you must:" -msgstr "" -"OpenStack İmaj servisi sanal makine imajlarını keşfetmek, kaydetmek ve almak " -"için bir servis sağlar. OpenStack İmaj API servisini etkin/pasif kipte " -"yüksek kullanılır yapmak için şunları yapmalısınız:" - -msgid "" -"The OpenStack Networking (neutron) service has a scheduler that lets you run " -"multiple agents across nodes. The DHCP agent can be natively highly " -"available." -msgstr "" -"OpenStack Ağ (neutron) servisi düğümler arasında birden fazla aracı " -"çalıştırmanızı sağlayan bir zamanlayıcıya sahiptir. DHCP aracısı yerel " -"olarak yüksek kullanılır olabilir." - -msgid "The Pacemaker architecture" -msgstr "Pacemaker mimarisi" - -msgid "" -"The Pacemaker service also requires an additional configuration file ``/etc/" -"corosync/uidgid.d/pacemaker`` to be created with the following content:" -msgstr "" -"Pacemaker servisi ayrıca aşağıdaki içeriğe sahip ek bir ``/etc/corosync/" -"uidgid.d/pacemaker`` yapılandırma dosyasına ihtiyaç duyar:" - -msgid "" -"The SQL relational database server provides stateful type consumed by other " -"components. Supported databases are MySQL, MariaDB, and PostgreSQL. Making " -"the SQL database redundant is complex." -msgstr "" -"SQL ilişkisel veritabanı sunucusu diğer bileşenler tarafından tüketilen " -"durumsal türü sağlar. Desteklenen veritabanları MySQL, MariaDB, ve " -"PostgreSQL'dir. SQL veritabanını yedekli hale getirmek karmaşıktır." - -msgid "" -"The Telemetry API service configuration does not have the ``option httpchk`` " -"directive as it cannot process this check properly." -msgstr "" -"Telemetri API servis yapılandırması bu kontrolü düzgün işleyemediğinden " -"``option httpchk`` yönergesine sahip değildir." - -msgid "" -"The Telemetry polling agent can be configured to partition its polling " -"workload between multiple agents. This enables high availability (HA)." -msgstr "" -"Telemetri yoklama aracısı yoklama iş yükünü birçok aracı arasında dağıtacak " -"şekilde yapılandırılabilir. Bu yüksek kullanılırlığı etkinleştirir (HA)." - -msgid "" -"The `Telemetry service `_ provides a data collection service and an alarming " -"service." -msgstr "" -"`Telemetri servisi `_ veri toplama servisi ve uyarı servisi sağlar." - -msgid "" -"The ``-p`` option is used to give the password on command line and makes it " -"easier to script." -msgstr "" -"``-p`` seçeneği komut satırından parola vermek için kullanılır ve betiklerde " -"kolaylık sağlar." - -msgid "" -"The ``admin_bind_host`` parameter lets you use a private network for admin " -"access." -msgstr "" -"``admin_bind_host`` parametresi yönetici erişimi için özel bir ağ " -"kullanmanızı sağlar." - -msgid "" -"The ``bindnetaddr`` is the network address of the interfaces to bind to. The " -"example uses two network addresses of /24 IPv4 subnets." -msgstr "" -"``bindnetaddr`` bağlanılacak arayüzün ağ adresidir. Örnek /24 IPv4 alt " -"ağlarının iki ağ adresini kullanır." - -msgid "" -"The ``token`` value specifies the time, in milliseconds, during which the " -"Corosync token is expected to be transmitted around the ring. When this " -"timeout expires, the token is declared lost, and after " -"``token_retransmits_before_loss_const lost`` tokens, the non-responding " -"processor (cluster node) is declared dead. ``token × " -"token_retransmits_before_loss_const`` is the maximum time a node is allowed " -"to not respond to cluster messages before being considered dead. The default " -"for token is 1000 milliseconds (1 second), with 4 allowed retransmits. These " -"defaults are intended to minimize failover times, but can cause frequent " -"false alarms and unintended failovers in case of short network " -"interruptions. The values used here are safer, albeit with slightly extended " -"failover times." -msgstr "" -"``token`` değeri Corosync jetonunun halka etrafında aktarım zamanını " -"milisaniye türünden belirtir. Bu zaman aşıldığında, jeton kayıp olarak ilan " -"edilir, ve ``token_retransmits_before_loss_const lost`` jeton sonra, yanıt " -"vermeyen işletic (küme düğümü) ölü olarak ilan edilir. Bir düğümün küme " -"iletilerine ölü olarak işaretlenmeden önce cevap vermeyebileceği azami süre " -"``token × token_retransmits_before_loss_const`` kadardır. Jeton için " -"öntanımlı değer 4 tekrar gönderime izin verilecek şekilde 1000 milisaniyedir " -"(1 saniye). Bu öntanımlılar arıza sürelerini asgariye indirmek için " -"düşünülmüştür, ama kısa ağ kesintileri olduğunda sık sık yanlış alarm " -"verebilir ve istenmeyen arıza bildirimlerine neden olabilir. Burada " -"kullanılan değerler daha güvenli olsa da biraz daha uzun arıza sürelerine " -"sahiptir." - -msgid "" -"The ``transport`` directive controls the transport mechanism. To avoid the " -"use of multicast entirely, specify the ``udpu`` unicast transport parameter. " -"This requires specifying the list of members in the ``nodelist`` directive. " -"This potentially makes up the membership before deployment. The default is " -"``udp``. The transport type can also be set to ``udpu`` or ``iba``." -msgstr "" -"``transport`` yönergesi aktarım yöntemini kontrol eder. Çoklu yayın " -"kullanmaktan tamamen kaçınmak için, ``udpu`` tekli yayın aktarım " -"parametresini belirtin. Bu, üye listesini ``nodelist`` yönergesinde " -"belirtmeyi gerektirir. Bunun kurulumdan önce üyeliği yapma potansiyeli " -"vardır. Öntanımlı değer ``udp`` dir. Aktarım türü ayrıca ``udpu`` veya " -"``iba`` olabilir." - -msgid "" -"The application layer is controlled by the ``oslo.messaging`` configuration " -"options for multiple AMQP hosts. If the AMQP node fails, the application " -"reconnects to the next one configured within the specified reconnect " -"interval. The specified reconnect interval constitutes its SLA." -msgstr "" -"Uygulama katmanı birçok AMQP sunucusu için ``oslo.messaging`` yapılandırma " -"seçeneği tarafından kontrol edilir. AMQP düğümü başarısız olursa, uygulama " -"belirtilen yeniden bağlanma aralığı dahilinde yapılandırılan bir sonrakine " -"bağlanır. Belirtilen yeniden bağlanma aralığı SLA'sını teşkil eder." - -msgid "" -"The architectural challenges of instance HA and several currently existing " -"solutions were presented in `a talk at the Austin summit `_, for which `slides are also available `_." -msgstr "" -"Sunucu yüksek kullanılırlığının mimari zorlukları ve mevcut çeşitli çözümler " -"`Austin zirvesindeki bir konuşmada `_ " -"sunulmuştur, `slaytlar da mevcuttur `_." - -msgid "" -"The architectures differ in the sets of services managed by the cluster." -msgstr "" -"Mimariler küme tarafından yönetilen servisler kümesinde değişiklik gösterir." - -msgid "" -"The availability check of the instances is provided by heartbeat messages. " -"When the connection with an instance is lost, the workload will be " -"reassigned within the remaining instances in the next polling cycle." -msgstr "" -"Sunucuların kullanılırlık kontrolleri kalp atışı iletileri ile sağlanır. Bir " -"sunucu ile olan bağlantı kaybolduğunda, iş yükü sonraki yoklama döngüsüyle " -"beraber kalan sunuculara yeniden atanır." - -msgid "" -"The benefits of this approach are the physical isolation between components " -"and the ability to add capacity to specific components." -msgstr "" -"Bu yaklaşımın faydaları bileşenler arasındaki fiziksel yalıtım ve belirli " -"bileşenlere kapasite ekleyebilme becerisidir." - -msgid "" -"The cloud controller runs on the management network and must talk to all " -"other services." -msgstr "" -"Bulut denetleyici yönetim ağı üzerinde çalışır ve tüm diğer servislerle " -"konuşmalıdır." - -msgid "" -"The cluster is fully operational with ``expected_votes`` set to 7 nodes " -"(each node has 1 vote), quorum: 4. If a list of nodes is specified as " -"``nodelist``, the ``expected_votes`` value is ignored." -msgstr "" -"Küme ``expected_votes`` 7 düğüm (her düğümün 1 oyu var) olarak ayarlanmış ve " -"tamamen işlevsel, yetersayı: 4. Eğer düğüm listesi ``nodelist`` olarak " -"belirtilmişse, ``expected_votes`` değeri göz ardı edilir." - -msgid "" -"The code for three of these solutions can be found online at the following " -"links:" -msgstr "" -"Bu çözümlerden üçü için kod şu bağlantılarda çevrimiçi olarak bulunabilir:" - -msgid "" -"The command :command:`crm configure` supports batch input, copy and paste " -"the lines above into your live Pacemaker configuration and then make changes " -"as required. For example, you may enter ``edit p_ip_cinder-api`` from the :" -"command:`crm configure` menu and edit the resource to match your preferred " -"virtual IP address." -msgstr "" -":command:`crm configure` komutu toplu girişi destekler, yukarıdaki satırları " -"kopyalayıp Pacemaker yapılandırmanıza yapıştırın ve gerekli değişiklikleri " -"yapın. Örneğin :command:`crm configure` menüsünden ``edit p_ip_cinder-api`` " -"girebilir ve kaynağı tercih ettiğiniz sanal IP adresi ile eşleşecek şekilde " -"düzenleyebilirsiniz." - -msgid "" -"The commands for installing RabbitMQ are specific to the Linux distribution " -"you are using." -msgstr "" -"RabbitMQ kurmak için kullanılan komutlar kullandığınız Linux dağıtımına " -"özeldir." - -msgid "" -"The correct path to ``libgalera_smm.so`` given to the ``wsrep_provider`` " -"parameter" -msgstr "" -"``wsrep_provider`` parametresine verilmiş ``libgalera_smm.so`` dosyasının " -"doğru yolu" - -msgid "" -"The first step is to install the database that sits at the heart of the " -"cluster. To implement high availability, run an instance of the database on " -"each controller node and use Galera Cluster to provide replication between " -"them. Galera Cluster is a synchronous multi-master database cluster, based " -"on MySQL and the InnoDB storage engine. It is a high-availability service " -"that provides high system uptime, no data loss, and scalability for growth." -msgstr "" -"İlk adım kümenin kalbinde olan veritabanının kurulumudur. Yüksek " -"kullanılırlık uygulamak için her bir kontrol düğümünde veritabanı sunucusu " -"çalıştırın ve aralarında yedekliliği sağlamak için Galera Kümesini kullanın. " -"Galera Kümesi MySQL ve InnoDB depolama motoru tabanlı, eşzamanlı, çoklu-ana " -"sunuculu veritabanı kümesidir. Yüksek sistem çalışma süresi, kayıpsız veri " -"ve büyüme için ölçeklenebilirlik sağlayan yüksek kullanılır bir servistir." - -msgid "The following are the definitions of stateless and stateful services:" -msgstr "Aşağıda durumsuz ve durumsal servis tanımları bulunur:" - -msgid "The following are the standard hardware requirements:" -msgstr "Aşağıdakiler standart donanım gereksinimleridir:" - -msgid "" -"The following components are currently unable to benefit from the use of a " -"proxy server:" -msgstr "Aşağıdaki bileşenler şu anda bir vekil sunucudan faydalanamıyorlar:" - -msgid "The following components/services can work with HA queues:" -msgstr "Aşağıdaki bileşenler/servisler HA kuyruklarıyla çalışabilir:" - -msgid "" -"The following section(s) detail how to add the OpenStack Identity resource " -"to Pacemaker on SUSE and Red Hat." -msgstr "" -"Aşağıdaki kısım(lar) OpenStack Kimlik kaynağını SUSE ve Red Hat üzerinde " -"nasıl Pacemaker'e ekleyeceğinizi ayrıntılar." - -msgid "" -"The majority of services, needing no real orchestration, are handled by " -"systemd on each node. This approach avoids the need to coordinate service " -"upgrades or location changes with the cluster and has the added advantage of " -"more easily scaling beyond Corosync's 16 node limit. However, it will " -"generally require the addition of an enterprise monitoring solution such as " -"Nagios or Sensu for those wanting centralized failure reporting." -msgstr "" -"Gerçek bir düzenleyiciye ihtiyaç duymayan servislerin büyük çoğunluğu her " -"bir düğümde systemd tarafından ele alınır. Bu yaklaşım küme ile servis " -"yükseltmeleri veya konum değişikliklerini eşgüdümleme ihtiyacını ortadan " -"kaldırır ve Corosync'in 16 düğüm sınırının ötesine daha kolay geçme " -"avantajınıa sahiptir. Ancak merkezi hata raporlama isteyenler için " -"genellikle Nagios veya Sensu gibi kurumsal bir izleme çözümü ihtiyacı " -"çıkarır." - -msgid "" -"The most popular AMQP implementation used in OpenStack installations is " -"RabbitMQ." -msgstr "" -"OpenStack kurulumlarında en yaygın kullanılan AMQP uygulaması RabbitMQ'dur." - -msgid "" -"The proxy can be configured as a secondary mechanism for detecting service " -"failures. It can even be configured to look for nodes in a degraded state " -"(such as being too far behind in the replication) and take them out of " -"circulation." -msgstr "" -"Vekil servis arızalarını algılamak için ikinci bir mekanizma olarak " -"yapılandırılabilir. Hatta artık kullanılmayan durumda olan düğümleri bulacak " -"ve döngüden çıkaracak şekilde bile yapılandırılabilir (örneğin yedeklemede " -"çok geride kalmak gibi sebeplerden)." - -msgid "" -"The quorum specifies the minimal number of nodes that must be functional in " -"a cluster of redundant nodes in order for the cluster to remain functional. " -"When one node fails and failover transfers control to other nodes, the " -"system must ensure that data and processes remain sane. To determine this, " -"the contents of the remaining nodes are compared and, if there are " -"discrepancies, a majority rules algorithm is implemented." -msgstr "" -"Yetersayı kümenin işlevsel kalabilmesi için yedekli düğümlerden oluşan bir " -"kümede bulunması gereken asgari düğüm sayısını belirtir. Bir düğüm " -"arızalandığında ve kurtarma kontrolü diğer düğümlere aktardığında, sistem " -"veri ve süreçlerin mantıklı kaldığından emin olmalıdır. Buna karar vermek " -"için kalan düğümlerin içerikleri karşılaştırılır ve çelişkiler varsa, " -"çoğunluk kuralı algoritması uygulanır." - -msgid "" -"The service declaration for the Pacemaker service may be placed in the :file:" -"`corosync.conf` file directly or in its own separate file, :file:`/etc/" -"corosync/service.d/pacemaker`." -msgstr "" -"Pacemaker servisi için servis tanımı doğrudan :file:`corosync.conf` " -"dosyasına veya kendine ait ayrı bir dosyaya konabilir, :file:`/etc/corosync/" -"service.d/pacemaker`." - -msgid "The steps to implement the Pacemaker cluster stack are:" -msgstr "Pacemaker küme yığınını uygulamak için gerekli adımlar:" - -msgid "" -"The votequorum library has been created to replace and eliminate ``qdisk``, " -"the disk-based quorum daemon for CMAN, from advanced cluster configurations." -msgstr "" -"Votequorum kitaplığı gelişmiş küme yapılandırmalarından, CMAN için disk " -"tabanlı yetersayı artalan işi olan ``qdisk``i değiştirmek veya çıkarmak için " -"oluşturuldu." - -msgid "" -"The votequorum library is part of the Corosync project. It provides an " -"interface to the vote-based quorum service and it must be explicitly enabled " -"in the Corosync configuration file. The main role of votequorum library is " -"to avoid split-brain situations, but it also provides a mechanism to:" -msgstr "" -"Votequorum kitaplığı Corosync projesinin parçasıdır. Oy tabanlı yetersayı " -"servisine bir arayüz sunar ve Corosync yapılandırma dosyasında özellikle " -"etkinleştirilmelidir. Voteqourum kitaplığının ana rolü ayrık beyin " -"durumlarını engellemektir, ama ayrıca şunlar için de bir yöntem sağlar:" - -msgid "" -"These agents must conform to one of the `OCF `_, `SysV Init " -"`_, Upstart, or Systemd standards." -msgstr "" -"Bu aracılar `OCF `_, `SysV Init `_, Upstart, " -"veya Systemd'den birine uyumludurlar." - -msgid "This can be achieved using the :command:`iptables` command:" -msgstr "Bu :command:`iptables` komutuyla elde edilebilir:" - -msgid "" -"This chapter describes the basic environment for high availability, such as " -"hardware, operating system, common services." -msgstr "" -"Bu bölüm donanım, işletim sistemi, yaygın servisler gibi yüksek " -"kullanılırlık için temel ortamı tanımlar." - -msgid "" -"This chapter describes the shared services for high availability, such as " -"database, messaging service." -msgstr "" -"Bu bölüm yüksek kullanılırlık için veritabanı, ileti servisi gibi paylaşımlı " -"servisleri tanımlar." - -msgid "" -"This configuration creates ``p_cinder-api``, a resource for managing the " -"Block Storage API service." -msgstr "" -"Bu yapılandırma Blok Depolama API servisini yönetmek için bir kaynak olan " -"``p_cinder-api`` oluşturur." - -msgid "" -"This configuration creates ``p_glance-api``, a resource for managing the " -"OpenStack Image API service." -msgstr "" -"Bu yapılandırma OpenStack İmaj API servisini yönetmek için bir kaynak olan " -"``p_glance-api`` oluşturur." - -msgid "" -"This configuration creates ``p_keystone``, a resource for managing the " -"OpenStack Identity service." -msgstr "" -"Bu yapılandırma OpenStack Kimlik servisini yönetmek için bir kaynak olan " -"``p_keystone``yi oluşturur." - -msgid "" -"This configuration creates ``p_manila-api``, a resource for managing the " -"Shared File Systems API service." -msgstr "" -"Bu yapılandırma Paylaşımlı Dosya Sistemleri API servisini yönetmek için bir " -"kaynak olan ``p_manila-api`` oluşturur." - -msgid "" -"This configuration creates ``vip``, a virtual IP address for use by the API " -"node (``10.0.0.11``)." -msgstr "" -"Bu yapılandırma API düğümü (``10.0.0.11``) tarafından kullanılmak üzere bir " -"sanal IP adresi ``vip`` oluşturur." - -msgid "" -"This document discusses some common methods of implementing highly available " -"systems, with an emphasis on the core OpenStack services and other open " -"source services that are closely aligned with OpenStack." -msgstr "" -"Bu belge OpenStack servisleri ve OpenStack ile ilgili diğer açık kaynak " -"servisleri ön planda tutarak bazı yaygın yüksek kullanılırlık sistemleri " -"uygulamalarını tartışır." - -msgid "" -"This example assumes that you are using NFS for the physical storage, which " -"will almost never be true in a production installation." -msgstr "" -"Bu örnek fiziksel depolama için NFS kullandığınızı varsayar, ki bu üretim " -"kurulumlarında nerdeyse hiçbir zaman doğru olmaz." - -msgid "This guide is intended as advice only." -msgstr "Bu kılavuz yalnıca öneri niteliğindedir." - -msgid "This guide uses the following example IP addresses:" -msgstr "Bu kılavuz şu örnek IP adreslerini kullanır:" - -msgid "" -"This guide was last updated as of the Ocata release, documenting the " -"OpenStack Ocata, Newton, and Mitaka releases. It may not apply to EOL " -"releases Kilo and Liberty." -msgstr "" -"Bu kılavuz en son Ocata dağıtımında güncellendi, OpenStack Ocata, Newton ve " -"Mitaka dağıtımlarını belgeler. Kilo ve Liberty gibi ömrü dolmuş dağıtımlara " -"uygulanamayabilir." - -msgid "This is the most common option and the one we document here." -msgstr "Bu burada belgelediğimiz en yaygın seçenektir." - -msgid "" -"This is why setting the quorum to a value less than ``floor(n/2) + 1`` is " -"dangerous. However it may be required for some specific cases, such as a " -"temporary measure at a point it is known with 100% certainty that the other " -"nodes are down." -msgstr "" -"Yetersayıyı ``taban(n/2) + 1`` değerinden küçük bir değere ayarlamak bu " -"yüzden tehlikelidir. Ancak bazı özel durumlarda gerekli olabilir, örneğin " -"diğer düğümlerin 100% bozuk olduğunun bilindiği geçici önlemler için." - -msgid "" -"This scenario can be visualized as below, where each box below represents a " -"cluster of three or more guests." -msgstr "" -"Bu senaryo aşağıdaki gibi görselleştirilebilir, her bir kutu üç ya da daha " -"fazla misafire sahip bir kümeyi temsil eder." - -msgid "This scenario can be visualized as below." -msgstr "Bu senaryo şu şekilde görselleştirilebilir." - -msgid "" -"This scenario has the advantage of requiring far fewer, if more powerful, " -"machines. Additionally, being part of a single cluster allows you to " -"accurately model the ordering dependencies between components." -msgstr "" -"Bu senaryonun çok daha az, daha güçlü makineler gerektirme avantajı vardır. " -"Ek olarak, tek bir kümenin üyesi olmak bileşenler arasındaki sıralama " -"bağımlılıklarını daha doğru modellemenizi sağlar." - -msgid "" -"This section discusses ways to protect against data loss in your OpenStack " -"environment." -msgstr "" -"Bu kısım OpenStack ortamınızda veri kaybına karşı alınabilecek yolları " -"tartışır." - -msgid "" -"This value increments with each transaction, so the most advanced node has " -"the highest sequence number and therefore is the most up to date." -msgstr "" -"Bu değer her bir aktarımla artar, yani en gelişmiş düğüm en yüksek sıra " -"numarasına sahiptir böylece en güncel olandır." - -msgid "" -"To be sure that all data is highly available, ensure that everything is " -"stored in the MySQL database (which is also highly available):" -msgstr "" -"Tüm verinin yüksek kullanılırlıklı olduğundan emin olmak için, herşeyin " -"MYSQL veritabanında depolandığından emin olun (kendisi de yüksek " -"kullanılırlıklı olmak üzere):" - -msgid "" -"To configure AppArmor to work with Galera Cluster, complete the following " -"steps on each cluster node:" -msgstr "" -"AppArmor'u Galera Kümesi ile çalışacak şekilde yapılandırmak için, her bir " -"küme düğümünde aşağıdaki adımları tamamlayın:" - -msgid "" -"To configure SELinux to permit Galera Cluster to operate, you may need to " -"use the ``semanage`` utility to open the ports it uses. For example:" -msgstr "" -"SELinux'u Galera Kümesinin işlemesine izin verecek şekilde yapılandırmak " -"için, kullandığı bağlantı noktalarını açmak için ``semanage`` aracını " -"kullanmanız gerekebilir. Örneğin:" - -msgid "" -"To configure the number of DHCP agents per network, modify the " -"``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron." -"conf` file. By default this is set to 1. To achieve high availability, " -"assign more than one DHCP agent per network. For more information, see `High-" -"availability for DHCP `_." -msgstr "" -"Ağ başına DHCP aracısı sayısını yapılandırmak için :file:`/etc/neutron/" -"neutron.conf` dosyasındaki ``dhcp_agents_per_network`` parametresini " -"değiştirin. Öntanımlı olarak bu değer 1 olarak ayarlıdır. Yüksek " -"kullanılırlık elde etmek için, ağ başına birden fazla DHCP aracısı atayın. " -"Daha fazla bilgi için, `DHCP için yüksek kullanılırlık `_ belgesine göz " -"atın." - -msgid "" -"To enable high availability for configured routers, edit the :file:`/etc/" -"neutron/neutron.conf` file to set the following values:" -msgstr "" -"Yapılandırılan yönlendiriciler için yüksek kullanılırlığı etkinleştirmek " -"için, :file:`/etc/neutron/neutron.conf` dosyasını düzenleyerek aşağıdaki " -"değerleri ayarlayın:" - -msgid "" -"To enable the compute agent to run multiple instances simultaneously with " -"workload partitioning, the ``workload_partitioning`` option must be set to " -"``True`` under the `compute section `_ in the :file:`ceilometer.conf` configuration " -"file." -msgstr "" -"Hesaplama aracısının iş yükünü dağıtarak birkaç sunucuyu aynı anda " -"çalıştırmasını etkinleştirmek için, :file:`ceilometer.conf` yapılandırma " -"dosyasında `hesaplama kısmı `_ altındaki ``workload_partitioning`` seçeneği " -"``True`` olarak ayarlanmalıdır." - -msgid "" -"To ensure that all queues except those with auto-generated names are " -"mirrored across all running nodes, set the ``ha-mode`` policy key to all by " -"running the following command on one of the nodes:" -msgstr "" -"Otomatik üretilen isimlere sahip olanlar hariç tüm kuyrukların tüm çalışan " -"düğümlerde yansılandığından emin olmak için, düğümlerden biri üzerinde " -"aşağıdaki komutu çalıştırarak ``ha-mode`` ilke anahtarını hepsi olarak " -"ayarlayın:" - -msgid "" -"To find the most advanced cluster node, you need to check the sequence " -"numbers, or the ``seqnos``, on the last committed transaction for each. You " -"can find this by viewing ``grastate.dat`` file in database directory:" -msgstr "" -"En gelişmiş küme düğümünü bulmak için, her biri için son gönderilen " -"aktarımın sıra numarasını, veya ``seqno``sunu kontrol etmelisiniz. Bunu " -"veritabanı dizinindeki ``grastate.dat`` dosyasını görüntüleyerek " -"bulabilirsiniz:" - -msgid "" -"To install and configure Memcached, read the `official documentation " -"`_." -msgstr "" -"Memecached kurulum ve yapılandırması için `resmi belgelendirmeyi `_ okuyun." - -msgid "To start the cluster, complete the following steps:" -msgstr "Kümeyi başlatmak için, aşağıdaki adımları tamamlayın:" - -msgid "" -"Traditionally, Pacemaker has been positioned as an all-encompassing " -"solution. However, as OpenStack services have matured, they are increasingly " -"able to run in an active/active configuration and gracefully tolerate the " -"disappearance of the APIs on which they depend." -msgstr "" -"Geleneksel olarak, Pacemaker herşeyi kapsayan bir çözüm olarak " -"konulandırıldı. Ancak, OpenStack servisleri olgunlaştıkça, giderek artan " -"şekilde etkin/etkin yapılandırmalarda çalışmaya başladılar ve bağımlılık " -"duydukları API'lerin kaybolması durumunu daha iyi kaldırabiliyorlar." - -msgid "True" -msgstr "Doğru" - -msgid "" -"Typically, an active/active installation for a stateless service maintains a " -"redundant instance, and requests are load balanced using a virtual IP " -"address and a load balancer such as HAProxy." -msgstr "" -"Genellikle, durumsuz bir servis için etkin/etkin bir kurulum yedek bir " -"sunucuya da bakar, ve istekler bir sanal IP adresi ve HAProxy gibi bir yük " -"dengeleyici ile dengelenir." - -msgid "Use HA queues in RabbitMQ (``x-ha-policy: all``):" -msgstr "RabbitMQ'da HA kuyrukları kullan (``x-ha-policy: all``):" - -msgid "" -"Use MySQL/Galera in active/passive mode to avoid deadlocks on ``SELECT ... " -"FOR UPDATE`` type queries (used, for example, by nova and neutron). This " -"issue is discussed in the following:" -msgstr "" -"``SELECT ... FOR UPDATE`` türü sorgularda ölükilitlere yol açmamak için " -"MySQL/Galera'yı etkin/pasif kipte kullanın (örneğin nova ve neutron " -"tarafından kullanılır). Bu sorun aşağıda tartışılmıştır:" - -msgid "Use durable queues in RabbitMQ:" -msgstr "RabbitMQ'da dayanıklı kuyruklar kullan:" - -msgid "" -"Use that password to authenticate to the nodes that will make up the cluster:" -msgstr "Kümeyi oluşturan düğümlere kimlik doğrulama için bu parolayı kullan:" - -msgid "" -"Use the :command:`corosync-cfgtool` utility with the ``-s`` option to get a " -"summary of the health of the communication rings:" -msgstr "" -"İletişim halkalarının sağlık özetini almak için :command:`corosync-cfgtool` " -"aracını ``-s`` seçeneğiyle kullanın:" - -msgid "" -"Use the :command:`corosync-objctl` utility to dump the Corosync cluster " -"member list:" -msgstr "" -"Corosync kümesi üye listesini dökmek için :command:`corosync-objctl` aracını " -"kullanın:" - -msgid "Use these steps to configurate all services using RabbitMQ:" -msgstr "" -"Tüm servisleri RabbitMQ kullanarak yapılandırmak için bu adımları kullanın:" - -msgid "Value" -msgstr "Değer" - -msgid "Verify that the nodes are running:" -msgstr "Düğümlerin çalıştığını doğrulayın:" - -msgid "Verify the cluster status:" -msgstr "Küme durumunu doğrula:" - -msgid "Virtualized hardware" -msgstr "Sanallaştırılmış donanım" - -msgid "" -"We advise that you read this at your own discretion when planning on your " -"OpenStack cloud." -msgstr "" -"OpenStack bulutunuzu planlarken bunu tedbirli şekilde okumanızı öneririz." - -msgid "" -"We do not recommend setting the quorum to a value less than ``floor(n/2) + " -"1`` as it would likely cause a split-brain in a face of network partitions." -msgstr "" -"Yetersayıyı ``taban(n/2) + 1`` değerinden daha küçük bir değer yapmanızı " -"önermiyoruz çünkü ağ bölümlemesi durumunda ayrık-beyine sebep olabilir." - -msgid "" -"We recommend HAProxy as the load balancer, however, there are many " -"alternative load balancing solutions in the marketplace." -msgstr "" -"Yük dengeleyici olarak HAProxy öneriyoruz, ancak, markette birçok alternatif " -"yük dengeleme çözümü mevcut." - -msgid "" -"We recommend two primary architectures for making OpenStack highly available." -msgstr "" -"OpenStack'i yüksek kullanılabilir yapmak için iki ana mimari öneriyoruz." - -msgid "" -"We recommended that the maximum latency between any two controller nodes is " -"2 milliseconds. Although the cluster software can be tuned to operate at " -"higher latencies, some vendors insist on this value before agreeing to " -"support the installation." -msgstr "" -"İki kontrol düğümü arasındaki azami gecikmenin 2 milisaniye olmasını " -"önerdik. Küme yazılımı daha yüksek gecikmelerle çalışabilecek olsa da, bazı " -"üreticiler kurulumu destekleme konusunda anlaşmaya varmadan öcne bu değer " -"üzerinde durur." - -msgid "What is a cluster manager?" -msgstr "Küme yöneticisi nedir?" - -msgid "" -"When Ceph RBD is used for ephemeral volumes as well as block and image " -"storage, it supports `live migration `_ of VMs with ephemeral drives. LVM only " -"supports live migration of volume-backed VMs." -msgstr "" -"Ceph RBD geçici birimler için kullanıldığı gibi blok ve imaj depolama için " -"de kullanıldığında, sanal makinelerin `canlı göçünü `_ geçici sürücülerle " -"sağlar. LVM yalnızca birim destekli sanal makinelerin canlı göçünü destekler." - -msgid "" -"When configuring an OpenStack environment for study or demonstration " -"purposes, it is possible to turn off the quorum checking. Production systems " -"should always run with quorum enabled." -msgstr "" -"OpenStack ortamını çalışma ya da gösterim amacıyla yapılandırırken, " -"yetersayı kontrolünü kapatmak mümkündür. Üretim sistemleri her zaman " -"yetersayı etkin olarak çalışmalıdır." - -msgid "" -"When each cluster node starts, it checks the IP addresses given to the " -"``wsrep_cluster_address`` parameter. It then attempts to establish network " -"connectivity with a database server running there. Once it establishes a " -"connection, it attempts to join the Primary Component, requesting a state " -"transfer as needed to bring itself into sync with the cluster." -msgstr "" -"Her bir küme düğümü başladığında ``wsrep_cluster_address`` parametresine " -"verilen IP adreslerini kontrol eder. Ardından orada çalışan bir veritabanı " -"sunucusu ile ağ bağlantısı kurmaya çalışır. Bağlantı elde ettiğinde, " -"Birincil Bileşene katılmaya çalışır, kendisini küme ile eşzamanlamak için " -"bir durum aktarımı ister." - -msgid "" -"When four nodes fail simultaneously, the cluster would continue to function " -"as well. But if split to partitions of three and four nodes respectively, " -"the quorum of three would have made both sides to attempt to fence the other " -"and host resources. Without fencing enabled, it would go straight to running " -"two copies of each resource." -msgstr "" -"Dört düğüm aynı anda arızalanırsa da küme işlemeye devam edecektir. Ama üç " -"ve dört düğüm olmak üzere bölümlere ayrıldıysa, üç olan yetersayı iki " -"tarafın da karşı tarafı yalıtıp kaynakları sunmaya çalışmasına yol " -"açacaktır. Parmaklıklama etkin değilse doğrudan her bir kaynağın iki " -"kopyasının sunulduğu duruma düşülür." - -msgid "" -"When installing highly available OpenStack on VMs, be sure that your " -"hypervisor permits promiscuous mode and disables MAC address filtering on " -"the external network." -msgstr "" -"Sanal makineler üzerinde yüksek kullanılabilir OpenStack kurarken, " -"hipervizörünüzün harici ağ üzerinde MAC adres süzmeyi kapatmaya ve seçici " -"olmayan kipe izin vermesine dikkat edin." - -msgid "" -"When you finish installing and configuring the OpenStack database, you can " -"initialize the Galera Cluster." -msgstr "" -"OpenStack veritabanını kurup yapılandırmayı bitirdiğinizde, Galera Kümesini " -"ilklendirebilirsiniz." - -msgid "" -"When you have all cluster nodes started, log into the database client of any " -"cluster node and check the ``wsrep_cluster_size`` status variable again:" -msgstr "" -"Tüm küme düğümleri başladıktan sonra, herhangi bir küme düğümünden " -"veritabanı istemcisine giriş yapın ve ``wsrep_cluster_size`` durum " -"değişkenini tekrar kontrol edin:" - -msgid "" -"When you start up a cluster (all nodes down) and set ``wait_for_all`` to 1, " -"the cluster quorum is held until all nodes are online and have joined the " -"cluster for the first time. This parameter is new in Corosync 2.0." -msgstr "" -"Bir küme başlattığınızda (tüm düğümler kapalı) ve ``wait_for_all`` değerini " -"1 yaptığınızda, tüm düğümler çevrimiçi olup kümeye ilk defa katılana kadar " -"küme yetersayısı bekletilir. Bu parametre Corosync 2.0 sürümüyle yeni " -"gelmiştir." - -msgid "" -"When you use high availability, consider the hardware requirements needed " -"for your application." -msgstr "" -"Yüksek kullanılırlık kullandığınızda, uygulamanız için gerekli donanım " -"gereksinimlerini göz önüne almalısınız." - -msgid "" -"While SYS-V init replacements like systemd can provide deterministic " -"recovery of a complex stack of services, the recovery is limited to one " -"machine and lacks the context of what is happening on other machines. This " -"context is crucial to determine the difference between a local failure, and " -"clean startup and recovery after a total site failure." -msgstr "" -"Systemd gibi SYS-V başlangıcının yerine geçen uygulamalar karmaşık servis " -"yığınlarının belirleyici şekilde kurtarılmasını sağlarken, kurtarma bir " -"makineyle sınırlıdır ve diğer makinelerde ne olduğuyla ilgili içeriğe sahip " -"değildir. İçerik, yerel bir arıza, temiz başlangıç ve toplu site arızasının " -"kurtarılması arasındaki farkı belirlemek için çok önemlidir." - -msgid "" -"While all of the configuration parameters available to the standard MySQL, " -"MariaDB, or Percona XtraDB database servers are available in Galera Cluster, " -"there are some that you must define an outset to avoid conflict or " -"unexpected behavior." -msgstr "" -"Standart MySQL, MariaDB veya Percona XtraDB veritabanı sunucularında " -"kullanılabilir tüm yapılandırma parametreleri Galera Kümesinde de " -"kullanılabilir olsa da, çakışmayı ya da beklenmedik davranışları önlemek " -"için kullanmamanız gereken bazıları vardır." - -msgid "" -"While the application can still run after the failure of several instances, " -"it may not have sufficient capacity to serve the required volume of " -"requests. A cluster can automatically recover failed instances to prevent " -"additional load induced failures." -msgstr "" -"Uygulama birçok sunucunun arızası durumunda da çalışabilse de, istek " -"yoğunluğuna yanıt verecek yeterli kapasiteye sahip olmayabilir. Küme arızalı " -"sunucuları otomatik olarak kurtarıp ek yüke dayalı arızaları önleyebilir." - -msgid "" -"With ``secauth`` enabled, Corosync nodes mutually authenticates using a 128-" -"byte shared secret stored in the :file:`/etc/corosync/authkey` file. This " -"can be generated with the :command:`corosync-keygen` utility. Cluster " -"communications are encrypted when using ``secauth``." -msgstr "" -"``secauth`` etkin olduğunda, Corosync düğümleri karşılıklı olarak :file:`/" -"etc/corosync/authkey` dosyasında bulunan 128-bayt paylaşımlı bir gizi " -"kullanarak kimlik doğrular. Bu :command:`corosync-keygen` aracı ile " -"üretilebilir. Küme iletişimi ``secauth`` kullanıldığında şifrelidir." - -msgid "" -"With this in mind, some vendors are restricting Pacemaker's use to services " -"that must operate in an active/passive mode (such as ``cinder-volume``), " -"those with multiple states (for example, Galera), and those with complex " -"bootstrapping procedures (such as RabbitMQ)." -msgstr "" -"Bunun yanında bazı üreticiler Pacemaker'in etkin/pasif kipinde çalışması " -"gereken servislerlere (``cinder-volume`` gibi), çoklu duruma sahip olanlara " -"(örneğin, Galera), ve karmaşık önyükleme yordamlarına sahip olanlarına " -"(örneğin RabbitMQ) kullanımını kısıtlıyorlar." - -msgid "" -"Within the ``nodelist`` directive, it is possible to specify specific " -"information about the nodes in the cluster. The directive can contain only " -"the node sub-directive, which specifies every node that should be a member " -"of the membership, and where non-default options are needed. Every node must " -"have at least the ``ring0_addr`` field filled." -msgstr "" -"``nodelist`` yönergesi içinde, küme içindeki düğümlerle ilgili belirli " -"bilgileri belirtmek mümkündür. Yönerge yalnızca düğüm alt-yönergesini " -"içerebilir, bu da aboneliğin üyesi olması gereken ve varsayılan dışında " -"seçeneklerin gerektiği her düğümü belirtir. Her düğümün en azından " -"``ring0_addr`` alanı dolu olmalıdır." - -msgid "" -"Work is in progress on a unified approach, which combines the best aspects " -"of existing upstream solutions. More details are available on `the HA VMs " -"user story wiki `_." -msgstr "" -"İş birleştirilmiş bir yaklaşımla sürmektedir, bu da mevcut çözümlerin en iyi " -"yönlerini birleştirir. Daha fazla ayrıntı `Yüksek kullanılırlıklı sanal " -"makineler kullanıcı hikayesi wiki'sinde `_ bulunabilir." - -msgid "" -"You can achieve high availability for the OpenStack database in many " -"different ways, depending on the type of database that you want to use. " -"There are three implementations of Galera Cluster available to you:" -msgstr "" -"OpenStack veritabanı için yüksek kullanılırlığı kullanmak istediğiniz " -"veritabanı türüne göre farklı yollarla elde edebilirsiniz. " -"Kullanabileceğiniz üç Galera Kümesi uygulaması bulunmaktadır:" - -msgid "" -"You can also ensure the availability by other means, using Keepalived or " -"Pacemaker." -msgstr "" -"Kullanılırlığı Keepalived veya Pacemaker kullanarak başka yollarla da " -"sağlayabilirsiniz." - -msgid "" -"You can have up to 16 cluster members (this is currently limited by the " -"ability of corosync to scale higher). In extreme cases, 32 and even up to 64 " -"nodes could be possible. However, this is not well tested." -msgstr "" -"En fazla 16 küme ögesine sahip olabilirsiniz (bu şu anda corosync'in daha " -"yukarı ölçekleme yeteneği tarafından kısıtlanıyor). Uç durumlarda, 32 ve " -"hatta 64 düğüm mümkün olabilir. Ancak bu iyi denenmemiştir." - -msgid "" -"You can now add the Pacemaker configuration for Block Storage API resource. " -"Connect to the Pacemaker cluster with the :command:`crm configure` command " -"and add the following cluster resources:" -msgstr "" -"Artık Blok Depolama API kaynağı için Pacemaker yapılandırması " -"ekleyebilirsiniz. :command:`crm configure` komutu ile Pacemaker kümesine " -"bağlanın ve aşağıdaki küme kaynaklarını ekleyin:" - -msgid "" -"You can now check the ``corosync`` connectivity with one of these tools." -msgstr "" -"Artık ``corosync`` bağlantısını şu araçlardan biriyle kontrol edebilirsiniz." - -msgid "" -"You can read more about these concerns on the `Red Hat Bugzilla `_ and there is a `psuedo " -"roadmap `_ " -"for addressing them upstream." -msgstr "" -"Bu endişelerle ilgili daha fazla bilgiyi `Red Hat Bugzilla'sında `_ okuyabilirsiniz, ayrıca nasıl " -"çözülecekleriyle ilgili bir `kalıp yol haritası `_ bulunur." - -msgid "" -"You can take periodic snap shots throughout the installation process and " -"roll back to a working configuration in the event of a problem." -msgstr "" -"Kurulum sürecinde aralıklarla anlık görüntüler alabilir ve sorun olması " -"durumunda çalışan bir yapılandırmaya dönebilirsiniz." - -msgid "You can use the `ping` command to find the latency between two servers." -msgstr "" -"İki sunucu arasındaki gecikmeyi bulmak için `ping` komutunu " -"kullanabilirsiniz." - -msgid "" -"You must also create the OpenStack Image API endpoint with this IP address. " -"If you are using both private and public IP addresses, create two virtual IP " -"addresses and define your endpoint. For example:" -msgstr "" -"Ayrıca OpenStack İmaj API uç noktasını da bu adresle oluşturmalısınız. Hem " -"özel hem açık IP adresleri kullanıyorsanız, iki sanal IP adresi oluşturup uç " -"noktanızı tanımlayın. Örneğin:" - -msgid "" -"You must configure a supported Tooz driver for the HA deployment of the " -"Telemetry services." -msgstr "" -"Telemetri servislerinin HA kurulumu için desteklenen bir Tooz sürücüsü " -"yapılandırmalısınız." - -msgid "You must create the Shared File Systems API endpoint with this IP." -msgstr "" -"Paylaşımlı Dosya Sistemleri API uç noktasını bu IP ile oluşturmalısınız." - -msgid "" -"You must select and assign a virtual IP address (VIP) that can freely float " -"between cluster nodes." -msgstr "" -"Küme düğümleri arasında serbestçe kayabilen bir sanal IP adresi (VIP) " -"seçmeli ve atamalısınız." - -msgid "" -"You must use the same name on every cluster node. The connection fails when " -"this value does not match." -msgstr "" -"Her küme düğümünde aynı ismi kullanmalısınız. Bu değer eşleşmediğinde " -"bağlantı başarısız olur." - -msgid "" -"You only need to do this on one cluster node. Galera Cluster replicates the " -"user to all the others." -msgstr "" -"Bunu yalnızca tek bir küme düğümünde yapmanız gerekir. Galera Kümesi " -"kullanıcıyı diğerlerine çoğaltır." - -msgid "" -"You should see a ``status=joined`` entry for each of your constituent " -"cluster nodes." -msgstr "" -"Kurucu küme düğümlerinizin her biri için bir ``durum=katıldı`` girdisi " -"görmelisiniz." - -msgid "" -"You will need to address high availability concerns for any applications " -"software that you run on your OpenStack environment. The important thing is " -"to make sure that your services are redundant and available. How you achieve " -"that is up to you." -msgstr "" -"OpenStack ortamınızda çalıştırdığınız tüm uygulama yazılımları için yüksek " -"kullanılırlık endişelerini gidermeniz gerekir. Önemli olan servislerinizin " -"yedekli ve kullanılabilir olduğundan emin olmaktır. Bunu nasıl elde " -"edeceğiniz size kalmış." - -msgid "" -"You would choose this option if you prefer to have fewer but more powerful " -"boxes." -msgstr "" -"Bu seçeneği daha az ama daha güçlü kutuları tercih ettiğinizde seçmeniz " -"mantıklıdır." - -msgid "" -"You would choose this option if you prefer to have more but less powerful " -"boxes." -msgstr "" -"Bu seçeneği daha fazla sayıda ama daha güçsüz kutuları tercih ettiğinizde " -"seçmeniz mantıklıdır." - -msgid "" -"Your OpenStack services must now point their Block Storage API configuration " -"to the highly available, virtual cluster IP address rather than a Block " -"Storage API server’s physical IP address as you would for a non-HA " -"environment." -msgstr "" -"OpenStack servisleriniz artık Blok Depolama API yapılandırmalarını yüksek " -"kullanılabilir olmayan bir ortamdaki gibi Blok Depolama API sunucusunun IP " -"adresi yerine yüksek kullanılır, sanal küme IP adresine çevirmelidir." - -msgid "" -"Your OpenStack services must now point their OpenStack Image API " -"configuration to the highly available, virtual cluster IP address instead of " -"pointing to the physical IP address of an OpenStack Image API server as you " -"would in a non-HA cluster." -msgstr "" -"OpenStack servisleriniz artık OpenStack İmaj API yapılandırmasını yüksek " -"kullanılır olmayan bir ortamdaki gibi OpenStack İmaj API sunucusunun " -"fiziksel IP adresine değil yüksek kullanılır, sanal küme IP adresine " -"çevirmelidir." - -msgid "" -"Your OpenStack services must now point their Shared File Systems API " -"configuration to the highly available, virtual cluster IP address rather " -"than a Shared File Systems API server’s physical IP address as you would for " -"a non-HA environment." -msgstr "" -"OpenStack servisleriniz artık Paylaşımlı Dosya Sistemleri API " -"yapılandırmalarını yüksek kullanılabilir olmayan bir ortamdaki gibi " -"Paylaşımlı Dosya Sistemleri API sunucusunun fiziksel IP adresi yerine yüksek " -"kullanılabilir, sanal küme IP adresine çevirmelidir." - -msgid "" -"Your OpenStack services now point their OpenStack Identity configuration to " -"the highly available virtual cluster IP address." -msgstr "" -"OpenStack servisleriniz artık OpenStack Kimlik yapılandırmalarını yüksek " -"kullanılırlıklı sanal küme IP adresine işaret ettiriyor." - -msgid "[TODO: need more discussion of these parameters]" -msgstr "[YAPILACAK: bu parametrelerle ilgili daha fazla tartışmak gerek]" - -msgid "" -"`Ceph RBD `_ is an innately high availability storage " -"back end. It creates a storage cluster with multiple nodes that communicate " -"with each other to replicate and redistribute data dynamically. A Ceph RBD " -"storage cluster provides a single shared set of storage nodes that can " -"handle all classes of persistent and ephemeral data (glance, cinder, and " -"nova) that are required for OpenStack instances." -msgstr "" -"`Ceph RBD `_ doğuştan yüksek kullanılabilirliğe sahip " -"depolama arka ucudur. Veriyi dinamik olarak birbiri arasında yeniden dağıtan " -"birçok düğümden oluşan bir depolama kümesi oluşturur. Bir Ceph RBD depolama " -"kümesi OpenStack sunucuları tarafından ihtiyaç duyulan her sınıf kalıcı ya " -"da geçici (glance, cinder ve nova) veriyi ele alabilen paylaşımlı depolama " -"düğümleri kümesini sağlar." - -msgid "`Clustering Guide `_" -msgstr "`Kümeleme Kılavuzu `_" - -msgid "`Debian and Ubuntu `_" -msgstr "`Debian ve Ubuntu `_" - -msgid "" -"`Galera Cluster for MySQL `_: The MySQL reference " -"implementation from Codership, Oy." -msgstr "" -"`MySQL için Galera Kümesi `_: Codership, Oy'den " -"MySQL başvuru uygulaması." - -msgid "`Highly Available Queues `_" -msgstr "`Yüksek Kullanılır Kuyruklar `_" - -msgid "" -"`IMPORTANT: MySQL Galera does *not* support SELECT ... FOR UPDATE `_" -msgstr "" -"`ÖNEMLİ: MySQL Galera SELECT ... FOR UPDATE *desteklemez* `_" - -msgid "" -"`MariaDB Galera Cluster `_: The MariaDB implementation " -"of Galera Cluster, which is commonly supported in environments based on Red " -"Hat distributions." -msgstr "" -"`MariaDB Galera Kümesi `_: Galera Kümesinin MariaDB " -"uygulaması, genellikle Red Hat dağıtımı tabanlı ortamlarda desteklenir." - -msgid "`Memcached `_:" -msgstr "`Memcached `_:" - -msgid "" -"`OCF RAs `_, as used by Red Hat and SUSE" -msgstr "" -"Red Hat ve SUSE tarafından kullanılan şekliyle `OCF RA'ları `_" - -msgid "" -"`Pacemaker `_ cluster stack is a state-of-the-art " -"high availability and load balancing stack for the Linux platform. Pacemaker " -"is used to make OpenStack infrastructure highly available." -msgstr "" -"`Pacemaker `_ küme yığını Linux platformu için " -"modern bir yüksek kullanılırlık ve yük dengeleme yığınıdır. Pacemaker " -"OpenStack alt yapısını yüksek kullanılır yapmak için kullanılır." - -msgid "" -"`Percona XtraDB Cluster `_: The XtraDB " -"implementation of Galera Cluster from Percona." -msgstr "" -"`Percona XtraDB Kümesi `_: Percona'dan Galera " -"Kümesi için XtraDB uygulaması." - -msgid "" -"`RPM based `_ (RHEL, Fedora, " -"CentOS, openSUSE)" -msgstr "" -"`RPM tabanlı `_ (RHEL, Fedora, " -"CentOS, openSUSE)" - -msgid "`Redis `_:" -msgstr "`Redis `_:" - -msgid "" -"`Understanding reservations, concurrency, and locking in Nova `_" -msgstr "" -"`Nova'da yer ayırmaların, aynı anda kullanımın ve kilitlerin anlaşılması " -"`_" - -msgid "`Zookeeper `_:" -msgstr "`Zookeeper `_:" - -msgid "``crmsh``" -msgstr "``crmsh``" - -msgid "" -"``last_man_standing_window`` specifies the time, in milliseconds, required " -"to recalculate quorum after one or more hosts have been lost from the " -"cluster. To perform a new quorum recalculation, the cluster must have quorum " -"for at least the interval specified for ``last_man_standing_window``. The " -"default is 10000ms." -msgstr "" -"``last_man_standing_window`` bir ya da daha fazla sunucu kümeden " -"kaybolduğunda yetersayıyı tekrar hesaplamak için gerekli sürenin milisaniye " -"cinsinden değeridir. Yeni bir yetersayı hesaplaması yapmak için, küme en az " -"``last_man_standing_window`` değerinde belirtilen aralık kadar beklemelidir. " -"Öntanımlı değer 10000 ms'dir." - -msgid "" -"``nodeid`` is optional when using IPv4 and required when using IPv6. This is " -"a 32-bit value specifying the node identifier delivered to the cluster " -"membership service. If this is not specified with IPv4, the node ID is " -"determined from the 32-bit IP address of the system to which the system is " -"bound with ring identifier of 0. The node identifier value of zero is " -"reserved and should not be used." -msgstr "" -"``nodeid`` IPv4 kullanırken isteğe bağlı, IPv6 kullanırken gereklidir. Küme " -"üyelik servisine gönderilen düğüm tanımlayıcısını belirten 32-bit bir " -"değerdir. IPv4 ile bu değer belirtilmezse, düğüm kimliği sistemin 0 halka " -"tanımlayıcısı ile bağlı bulunduğu sistemin 32-bit IP adresinden karar " -"verilir. Düğüm tanımlayıcı değeri olan sıfır ayrılmıştır, ve " -"kullanılmamalıdır." - -msgid "``pcs``" -msgstr "``pcs``" - -msgid "" -"``ring{X}_addr`` specifies the IP address of one of the nodes. ``{X}`` is " -"the ring number." -msgstr "" -"``ring{X}_addr`` düğümlerden birinin IP adresini belirtir. ``{X}`` halka " -"numarasıdır." - -msgid "" -"`a mistral-based auto-recovery workflow `_, by Intel" -msgstr "" -"Intel tarafından `mistral-tabanlı otomatik kurtarma iş akışı `_" - -msgid "`corosync`" -msgstr "`corosync`" - -msgid "`fence-agents` (CentOS or RHEL) or cluster-glue" -msgstr "`fence-agents` (CentOS veya RHEL) veya cluster-glue" - -msgid "`libqb0`" -msgstr "`libqb0`" - -msgid "`masakari `_, by NTT" -msgstr "NTT tarafından `masakari `_" - -msgid "`pacemaker`" -msgstr "`pacemaker`" - -msgid "`pcs` (CentOS or RHEL) or crmsh" -msgstr "`pcs` (CentOS veya RHEL) veya crmsh" - -msgid "`resource-agents`" -msgstr "`resource-agents`" - -msgid "allow_automatic_l3agent_failover" -msgstr "allow_automatic_l3agent_failover" - -msgid "compute node" -msgstr "hesaplama düğümü" - -msgid "controller node" -msgstr "kontrol düğümü" - -msgid "l3_ha" -msgstr "l3_ha" - -msgid "max_l3_agents_per_router" -msgstr "max_l3_agents_per_router" - -msgid "min_l3_agents_per_router" -msgstr "min_l3_agents_per_router" diff --git a/doc/ha-guide/source/networking-ha-dhcp.rst b/doc/ha-guide/source/networking-ha-dhcp.rst deleted file mode 100644 index b59f282973..0000000000 --- a/doc/ha-guide/source/networking-ha-dhcp.rst +++ /dev/null @@ -1,13 +0,0 @@ -========================= -Run Networking DHCP agent -========================= - -The OpenStack Networking (neutron) service has a scheduler that lets you run -multiple agents across nodes. The DHCP agent can be natively highly available. - -To configure the number of DHCP agents per network, modify the -``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron.conf` -file. By default this is set to 1. To achieve high availability, assign more -than one DHCP agent per network. For more information, see -`High-availability for DHCP -`_. diff --git a/doc/ha-guide/source/networking-ha-l3.rst b/doc/ha-guide/source/networking-ha-l3.rst deleted file mode 100644 index 231b262b72..0000000000 --- a/doc/ha-guide/source/networking-ha-l3.rst +++ /dev/null @@ -1,37 +0,0 @@ -======================= -Run Networking L3 agent -======================= - -The Networking (neutron) service L3 agent is scalable, due to the scheduler -that supports Virtual Router Redundancy Protocol (VRRP) to distribute virtual -routers across multiple nodes. For more information about the VRRP and -keepalived, see `Linux bridge: High availability using VRRP -`_ -and `Open vSwitch: High availability using VRRP -`_. - -To enable high availability for configured routers, edit the -:file:`/etc/neutron/neutron.conf` file to set the following values: - -.. tabularcolumns:: |l|l|L| -.. list-table:: /etc/neutron/neutron.conf parameters for high availability - :widths: 15 10 30 - :header-rows: 1 - - * - Parameter - - Value - - Description - * - l3_ha - - True - - All routers are highly available by default. - * - allow_automatic_l3agent_failover - - True - - Set automatic L3 agent failover for routers - * - max_l3_agents_per_router - - 2 or more - - Maximum number of network nodes to use for the HA router. - * - min_l3_agents_per_router - - 2 or more - - Minimum number of network nodes to use for the HA router. - A new router can be created only if this number - of network nodes are available. diff --git a/doc/ha-guide/source/networking-ha.rst b/doc/ha-guide/source/networking-ha.rst deleted file mode 100644 index 81615ca83e..0000000000 --- a/doc/ha-guide/source/networking-ha.rst +++ /dev/null @@ -1,35 +0,0 @@ -=================================== -Configuring the networking services -=================================== - -.. toctree:: - :maxdepth: 2 - - networking-ha-dhcp.rst - networking-ha-l3.rst - -Configure networking on each node. See the basic information -about configuring networking in the *Networking service* -section of the -`Install Guides `_, -depending on your distribution. - -OpenStack network nodes contain: - -- :doc:`Networking DHCP agent` -- :doc:`Neutron L3 agent` -- Networking L2 agent - - .. note:: - - The L2 agent cannot be distributed and highly available. - Instead, it must be installed on each data forwarding node - to control the virtual network driver such as Open vSwitch - or Linux Bridge. One L2 agent runs per node and controls its - virtual interfaces. - -.. note:: - - For Liberty, you can not have the standalone network nodes. - The Networking services are run on the controller nodes. - In this guide, the term `network nodes` is used for convenience. diff --git a/doc/ha-guide/source/shared-database-configure.rst b/doc/ha-guide/source/shared-database-configure.rst deleted file mode 100644 index 808b5576fc..0000000000 --- a/doc/ha-guide/source/shared-database-configure.rst +++ /dev/null @@ -1,301 +0,0 @@ -============== -Configuration -============== - -Before you launch Galera Cluster, you need to configure the server -and the database to operate as part of the cluster. - -Configuring the server -~~~~~~~~~~~~~~~~~~~~~~~ - -Certain services running on the underlying operating system of your -OpenStack database may block Galera Cluster from normal operation -or prevent ``mysqld`` from achieving network connectivity with the cluster. - -Firewall ---------- - -Galera Cluster requires that you open the following ports to network traffic: - -- On ``3306``, Galera Cluster uses TCP for database client connections - and State Snapshot Transfers methods that require the client, - (that is, ``mysqldump``). -- On ``4567``, Galera Cluster uses TCP for replication traffic. Multicast - replication uses both TCP and UDP on this port. -- On ``4568``, Galera Cluster uses TCP for Incremental State Transfers. -- On ``4444``, Galera Cluster uses TCP for all other State Snapshot Transfer - methods. - -.. seealso:: - - For more information on firewalls, see `firewalls and default ports - `_ - in OpenStack Administrator Guide. - -This can be achieved using the :command:`iptables` command: - -.. code-block:: console - - # iptables --append INPUT --in-interface eth0 \ - --protocol tcp --match tcp --dport ${PORT} \ - --source ${NODE-IP-ADDRESS} --jump ACCEPT - -Make sure to save the changes once you are done. This will vary -depending on your distribution: - -- For `Ubuntu `_ -- For `Fedora `_ - -Alternatively, make modifications using the ``firewall-cmd`` utility for -FirewallD that is available on many Linux distributions: - -.. code-block:: console - - # firewall-cmd --add-service=mysql --permanent - # firewall-cmd --add-port=3306/tcp --permanent - -SELinux --------- - -Security-Enhanced Linux is a kernel module for improving security on Linux -operating systems. It is commonly enabled and configured by default on -Red Hat-based distributions. In the context of Galera Cluster, systems with -SELinux may block the database service, keep it from starting, or prevent it -from establishing network connections with the cluster. - -To configure SELinux to permit Galera Cluster to operate, you may need -to use the ``semanage`` utility to open the ports it uses. For -example: - -.. code-block:: console - - # semanage port -a -t mysqld_port_t -p tcp 3306 - -Older versions of some distributions, which do not have an up-to-date -policy for securing Galera, may also require SELinux to be more -relaxed about database access and actions: - -.. code-block:: console - - # semanage permissive -a mysqld_t - -.. note:: - - Bear in mind, leaving SELinux in permissive mode is not a good - security practice. Over the longer term, you need to develop a - security policy for Galera Cluster and then switch SELinux back - into enforcing mode. - - For more information on configuring SELinux to work with - Galera Cluster, see the `SELinux Documentation - `_ - -AppArmor ---------- - -Application Armor is a kernel module for improving security on Linux -operating systems. It is developed by Canonical and commonly used on -Ubuntu-based distributions. In the context of Galera Cluster, systems -with AppArmor may block the database service from operating normally. - -To configure AppArmor to work with Galera Cluster, complete the -following steps on each cluster node: - -#. Create a symbolic link for the database server in the ``disable`` directory: - - .. code-block:: console - - # ln -s /etc/apparmor.d/usr /etc/apparmor.d/disable/.sbin.mysqld - -#. Restart AppArmor. For servers that use ``init``, run the following command: - - .. code-block:: console - - # service apparmor restart - - For servers that use ``systemd``, run the following command: - - .. code-block:: console - - # systemctl restart apparmor - -AppArmor now permits Galera Cluster to operate. - -Database configuration -~~~~~~~~~~~~~~~~~~~~~~~ - -MySQL databases, including MariaDB and Percona XtraDB, manage their -configurations using a ``my.cnf`` file, which is typically located in the -``/etc`` directory. Configuration options available in these databases are -also available in Galera Cluster, with some restrictions and several -additions. - -.. code-block:: ini - - [mysqld] - datadir=/var/lib/mysql - socket=/var/lib/mysql/mysql.sock - user=mysql - binlog_format=ROW - bind-address=10.0.0.12 - - # InnoDB Configuration - default_storage_engine=innodb - innodb_autoinc_lock_mode=2 - innodb_flush_log_at_trx_commit=0 - innodb_buffer_pool_size=122M - - # Galera Cluster Configuration - wsrep_provider=/usr/lib/libgalera_smm.so - wsrep_provider_options="pc.recovery=TRUE;gcache.size=300M" - wsrep_cluster_name="my_example_cluster" - wsrep_cluster_address="gcomm://GALERA1-IP,GALERA2-IP,GALERA3-IP" - wsrep_sst_method=rsync - - -Configuring mysqld -------------------- - -While all of the configuration parameters available to the standard MySQL, -MariaDB, or Percona XtraDB database servers are available in Galera Cluster, -there are some that you must define an outset to avoid conflict or -unexpected behavior. - -- Ensure that the database server is not bound only to the localhost: - ``127.0.0.1``. Also, do not bind it to ``0.0.0.0``. Binding to the localhost - or ``0.0.0.0`` makes ``mySQL`` bind to all IP addresses on the machine, - including the virtual IP address causing ``HAProxy`` not to start. Instead, - bind to the management IP address of the controller node to enable access by - other nodes through the management network: - - .. code-block:: ini - - bind-address=10.0.0.12 - -- Ensure that the binary log format is set to use row-level replication, - as opposed to statement-level replication: - - .. code-block:: ini - - binlog_format=ROW - - -Configuring InnoDB -------------------- - -Galera Cluster does not support non-transactional storage engines and -requires that you use InnoDB by default. There are some additional -parameters that you must define to avoid conflicts. - -- Ensure that the default storage engine is set to InnoDB: - - .. code-block:: ini - - default_storage_engine=InnoDB - -- Ensure that the InnoDB locking mode for generating auto-increment values - is set to ``2``, which is the interleaved locking mode: - - .. code-block:: ini - - innodb_autoinc_lock_mode=2 - - Do not change this value. Other modes may cause ``INSERT`` statements - on tables with auto-increment columns to fail as well as unresolved - deadlocks that leave the system unresponsive. - -- Ensure that the InnoDB log buffer is written to file once per second, - rather than on each commit, to improve performance: - - .. code-block:: ini - - innodb_flush_log_at_trx_commit=0 - - Setting this parameter to ``0`` or ``2`` can improve - performance, but it introduces certain dangers. Operating system failures can - erase the last second of transactions. While you can recover this data - from another node, if the cluster goes down at the same time - (in the event of a data center power outage), you lose this data permanently. - -- Define the InnoDB memory buffer pool size. The default value is 128 MB, - but to compensate for Galera Cluster's additional memory usage, scale - your usual value back by 5%: - - .. code-block:: ini - - innodb_buffer_pool_size=122M - - -Configuring wsrep replication ------------------------------- - -Galera Cluster configuration parameters all have the ``wsrep_`` prefix. -You must define the following parameters for each cluster node in your -OpenStack database. - -- **wsrep Provider**: The Galera Replication Plugin serves as the ``wsrep`` - provider for Galera Cluster. It is installed on your system as the - ``libgalera_smm.so`` file. Define the path to this file in - your ``my.cnf``: - - .. code-block:: ini - - wsrep_provider="/usr/lib/libgalera_smm.so" - -- **Cluster Name**: Define an arbitrary name for your cluster. - - .. code-block:: ini - - wsrep_cluster_name="my_example_cluster" - - You must use the same name on every cluster node. The connection fails - when this value does not match. - -- **Cluster Address**: List the IP addresses for each cluster node. - - .. code-block:: ini - - wsrep_cluster_address="gcomm://192.168.1.1,192.168.1.2,192.168.1.3" - - Replace the IP addresses given here with comma-separated list of each - OpenStack database in your cluster. - -- **Node Name**: Define the logical name of the cluster node. - - .. code-block:: ini - - wsrep_node_name="Galera1" - -- **Node Address**: Define the IP address of the cluster node. - - .. code-block:: ini - - wsrep_node_address="192.168.1.1" - -Additional parameters -^^^^^^^^^^^^^^^^^^^^^^ - -For a complete list of the available parameters, run the -``SHOW VARIABLES`` command from within the database client: - -.. code-block:: mysql - - SHOW VARIABLES LIKE 'wsrep_%'; - - +------------------------------+-------+ - | Variable_name | Value | - +------------------------------+-------+ - | wsrep_auto_increment_control | ON | - +------------------------------+-------+ - | wsrep_causal_reads | OFF | - +------------------------------+-------+ - | wsrep_certify_nonPK | ON | - +------------------------------+-------+ - | ... | ... | - +------------------------------+-------+ - | wsrep_sync_wait | 0 | - +------------------------------+-------+ - -For documentation about these parameters, ``wsrep`` provider option, and status -variables available in Galera Cluster, see the Galera cluster `Reference -`_. diff --git a/doc/ha-guide/source/shared-database-manage.rst b/doc/ha-guide/source/shared-database-manage.rst deleted file mode 100644 index ec5ef304a9..0000000000 --- a/doc/ha-guide/source/shared-database-manage.rst +++ /dev/null @@ -1,249 +0,0 @@ -========== -Management -========== - -When you finish installing and configuring the OpenStack database, -you can initialize the Galera Cluster. - -Prerequisites -~~~~~~~~~~~~~ - -- Database hosts with Galera Cluster installed -- A minimum of three hosts -- No firewalls between the hosts -- SELinux and AppArmor set to permit access to ``mysqld`` -- The correct path to ``libgalera_smm.so`` given to the - ``wsrep_provider`` parameter - -Initializing the cluster -~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the Galera Cluster, the Primary Component is the cluster of database -servers that replicate into each other. In the event that a -cluster node loses connectivity with the Primary Component, it -defaults into a non-operational state, to avoid creating or serving -inconsistent data. - -By default, cluster nodes do not start as part of a Primary Component. -In the Primary Component, replication and state transfers bring all databases -to the same state. - -To start the cluster, complete the following steps: - -#. Initialize the Primary Component on one cluster node. For - servers that use ``init``, run the following command: - - .. code-block:: console - - # service mysql start --wsrep-new-cluster - - For servers that use ``systemd``, run the following command: - - .. code-block:: console - - # systemctl start mariadb --wsrep-new-cluster - -#. Once the database server starts, check the cluster status using - the ``wsrep_cluster_size`` status variable. From the database - client, run the following command: - - .. code-block:: mysql - - SHOW STATUS LIKE 'wsrep_cluster_size'; - - +--------------------+-------+ - | Variable_name | Value | - +--------------------+-------+ - | wsrep_cluster_size | 1 | - +--------------------+-------+ - -#. Start the database server on all other cluster nodes. For - servers that use ``init``, run the following command: - - .. code-block:: console - - # service mysql start - - For servers that use ``systemd``, run the following command: - - .. code-block:: console - - # systemctl start mariadb - -#. When you have all cluster nodes started, log into the database - client of any cluster node and check the ``wsrep_cluster_size`` - status variable again: - - .. code-block:: mysql - - SHOW STATUS LIKE 'wsrep_cluster_size'; - - +--------------------+-------+ - | Variable_name | Value | - +--------------------+-------+ - | wsrep_cluster_size | 3 | - +--------------------+-------+ - -When each cluster node starts, it checks the IP addresses given to -the ``wsrep_cluster_address`` parameter. It then attempts to establish -network connectivity with a database server running there. Once it -establishes a connection, it attempts to join the Primary -Component, requesting a state transfer as needed to bring itself -into sync with the cluster. - -.. note:: - - In the event that you need to restart any cluster node, you can do - so. When the database server comes back it, it establishes - connectivity with the Primary Component and updates itself to any - changes it may have missed while down. - -Restarting the cluster ------------------------ - -Individual cluster nodes can stop and be restarted without issue. -When a database loses its connection or restarts, the Galera Cluster -brings it back into sync once it reestablishes connection with the -Primary Component. In the event that you need to restart the -entire cluster, identify the most advanced cluster node and -initialize the Primary Component on that node. - -To find the most advanced cluster node, you need to check the -sequence numbers, or the ``seqnos``, on the last committed transaction for -each. You can find this by viewing ``grastate.dat`` file in -database directory: - -.. code-block:: console - - $ cat /path/to/datadir/grastate.dat - - # Galera saved state - version: 3.8 - uuid: 5ee99582-bb8d-11e2-b8e3-23de375c1d30 - seqno: 8204503945773 - -Alternatively, if the database server is running, use the -``wsrep_last_committed`` status variable: - -.. code-block:: mysql - - SHOW STATUS LIKE 'wsrep_last_committed'; - - +----------------------+--------+ - | Variable_name | Value | - +----------------------+--------+ - | wsrep_last_committed | 409745 | - +----------------------+--------+ - -This value increments with each transaction, so the most advanced -node has the highest sequence number and therefore is the most up to date. - -Configuration tips -~~~~~~~~~~~~~~~~~~~ - -Deployment strategies ----------------------- - -Galera can be configured using one of the following -strategies: - -- Each instance has its own IP address: - - OpenStack services are configured with the list of these IP - addresses so they can select one of the addresses from those - available. - -- Galera runs behind HAProxy: - - HAProxy load balances incoming requests and exposes just one IP - address for all the clients. - - Galera synchronous replication guarantees a zero slave lag. The - failover procedure completes once HAProxy detects that the active - back end has gone down and switches to the backup one, which is - then marked as ``UP``. If no back ends are ``UP``, the failover - procedure finishes only when the Galera Cluster has been - successfully reassembled. The SLA is normally no more than 5 - minutes. - -- Use MySQL/Galera in active/passive mode to avoid deadlocks on - ``SELECT ... FOR UPDATE`` type queries (used, for example, by nova - and neutron). This issue is discussed in the following: - - - `IMPORTANT: MySQL Galera does *not* support SELECT ... FOR UPDATE - `_ - - `Understanding reservations, concurrency, and locking in Nova - `_ - -Configuring HAProxy --------------------- - -If you use HAProxy as a load-balancing client to provide access to the -Galera Cluster, as described in the :doc:`controller-ha-haproxy`, you can -use the ``clustercheck`` utility to improve health checks. - -#. Create a configuration file for ``clustercheck`` at - ``/etc/sysconfig/clustercheck``: - - .. code-block:: ini - - MYSQL_USERNAME="clustercheck_user" - MYSQL_PASSWORD="my_clustercheck_password" - MYSQL_HOST="localhost" - MYSQL_PORT="3306" - -.. note:: - For Ubuntu 16.04.1: Create a configuration file for ``clustercheck`` - at ``/etc/default/clustercheck``. - -#. Log in to the database client and grant the ``clustercheck`` user - ``PROCESS`` privileges: - - .. code-block:: mysql - - GRANT PROCESS ON *.* TO 'clustercheck_user'@'localhost' - IDENTIFIED BY 'my_clustercheck_password'; - - FLUSH PRIVILEGES; - - You only need to do this on one cluster node. Galera Cluster - replicates the user to all the others. - -#. Create a configuration file for the HAProxy monitor service, at - ``/etc/xinetd.d/galera-monitor``: - - .. code-block:: none - - service galera-monitor - { - port = 9200 - disable = no - socket_type = stream - protocol = tcp - wait = no - user = root - group = root - groups = yes - server = /usr/bin/clustercheck - type = UNLISTED - per_source = UNLIMITED - log_on_success = - log_on_failure = HOST - flags = REUSE - } - -#. Start the ``xinetd`` daemon for ``clustercheck``. For servers - that use ``init``, run the following commands: - - .. code-block:: console - - # service xinetd enable - # service xinetd start - - For servers that use ``systemd``, run the following commands: - - .. code-block:: console - - # systemctl daemon-reload - # systemctl enable xinetd - # systemctl start xinetd diff --git a/doc/ha-guide/source/shared-database.rst b/doc/ha-guide/source/shared-database.rst deleted file mode 100644 index 6f99515d81..0000000000 --- a/doc/ha-guide/source/shared-database.rst +++ /dev/null @@ -1,32 +0,0 @@ -=============================================== -Database (Galera Cluster) for high availability -=============================================== - -.. toctree:: - :maxdepth: 2 - - shared-database-configure.rst - shared-database-manage.rst - -The first step is to install the database that sits at the heart of the -cluster. To implement high availability, run an instance of the database on -each controller node and use Galera Cluster to provide replication between -them. Galera Cluster is a synchronous multi-master database cluster, based -on MySQL and the InnoDB storage engine. It is a high-availability service -that provides high system uptime, no data loss, and scalability for growth. - -You can achieve high availability for the OpenStack database in many -different ways, depending on the type of database that you want to use. -There are three implementations of Galera Cluster available to you: - -- `Galera Cluster for MySQL `_: The MySQL - reference implementation from Codership, Oy. -- `MariaDB Galera Cluster `_: The MariaDB - implementation of Galera Cluster, which is commonly supported in - environments based on Red Hat distributions. -- `Percona XtraDB Cluster `_: The XtraDB - implementation of Galera Cluster from Percona. - -In addition to Galera Cluster, you can also achieve high availability -through other database options, such as PostgreSQL, which has its own -replication system. diff --git a/doc/ha-guide/source/shared-messaging.rst b/doc/ha-guide/source/shared-messaging.rst deleted file mode 100644 index 4daa882187..0000000000 --- a/doc/ha-guide/source/shared-messaging.rst +++ /dev/null @@ -1,285 +0,0 @@ -======================================= -Messaging service for high availability -======================================= - -An AMQP (Advanced Message Queuing Protocol) compliant message bus is -required for most OpenStack components in order to coordinate the -execution of jobs entered into the system. - -The most popular AMQP implementation used in OpenStack installations -is RabbitMQ. - -RabbitMQ nodes fail over on the application and the infrastructure layers. - -The application layer is controlled by the ``oslo.messaging`` -configuration options for multiple AMQP hosts. If the AMQP node fails, -the application reconnects to the next one configured within the -specified reconnect interval. The specified reconnect interval -constitutes its SLA. - -On the infrastructure layer, the SLA is the time for which RabbitMQ -cluster reassembles. Several cases are possible. The Mnesia keeper -node is the master of the corresponding Pacemaker resource for -RabbitMQ. When it fails, the result is a full AMQP cluster downtime -interval. Normally, its SLA is no more than several minutes. Failure -of another node that is a slave of the corresponding Pacemaker -resource for RabbitMQ results in no AMQP cluster downtime at all. - -Making the RabbitMQ service highly available involves the following steps: - -- :ref:`Install RabbitMQ` - -- :ref:`Configure RabbitMQ for HA queues` - -- :ref:`Configure OpenStack services to use RabbitMQ HA queues - ` - -.. note:: - - Access to RabbitMQ is not normally handled by HAProxy. Instead, - consumers must be supplied with the full list of hosts running - RabbitMQ with ``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` - option. For more information, read the `core issue - `_. - For more detail, read the `history and solution - `_. - -.. _rabbitmq-install: - -Install RabbitMQ -~~~~~~~~~~~~~~~~ - -The commands for installing RabbitMQ are specific to the Linux distribution -you are using. - -For Ubuntu or Debian: - -.. code-block:: console - - # apt-get install rabbitmq-server - -For RHEL, Fedora, or CentOS: - -.. code-block:: console - - # yum install rabbitmq-server - -For openSUSE: - -.. code-block:: console - - # zypper install rabbitmq-server - -For SLES 12: - -.. code-block:: console - - # zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo - [Verify the fingerprint of the imported GPG key. See below.] - # zypper install rabbitmq-server - -.. note:: - - For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. - You should verify the fingerprint of the imported GPG key before using it. - - .. code-block:: none - - Key ID: 893A90DAD85F9316 - Key Name: Cloud:OpenStack OBS Project - Key Fingerprint: 35B34E18ABC1076D66D5A86B893A90DAD85F9316 - Key Created: Tue Oct 8 13:34:21 2013 - Key Expires: Thu Dec 17 13:34:21 2015 - -For more information, see the official installation manual for the -distribution: - -- `Debian and Ubuntu `_ -- `RPM based `_ - (RHEL, Fedora, CentOS, openSUSE) - -.. _rabbitmq-configure: - -Configure RabbitMQ for HA queues -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. [TODO: This section should begin with a brief mention -.. about what HA queues are and why they are valuable, etc] - -.. [TODO: replace "currently" with specific release names] - -.. [TODO: Does this list need to be updated? Perhaps we need a table -.. that shows each component and the earliest release that allows it -.. to work with HA queues.] - -The following components/services can work with HA queues: - -- OpenStack Compute -- OpenStack Block Storage -- OpenStack Networking -- Telemetry - -Consider that, while exchanges and bindings survive the loss of individual -nodes, queues and their messages do not because a queue and its contents -are located on one node. If we lose this node, we also lose the queue. - -Mirrored queues in RabbitMQ improve the availability of service since -it is resilient to failures. - -Production servers should run (at least) three RabbitMQ servers for testing -and demonstration purposes, however it is possible to run only two servers. -In this section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. -To build a broker, ensure that all nodes have the same Erlang cookie file. - -.. [TODO: Should the example instead use a minimum of three nodes?] - -#. Stop RabbitMQ and copy the cookie from the first node to each of the - other node(s): - - .. code-block:: console - - # scp /var/lib/rabbitmq/.erlang.cookie root@NODE:/var/lib/rabbitmq/.erlang.cookie - -#. On each target node, verify the correct owner, - group, and permissions of the file :file:`erlang.cookie`: - - .. code-block:: console - - # chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie - # chmod 400 /var/lib/rabbitmq/.erlang.cookie - -#. Start the message queue service on all nodes and configure it to start - when the system boots. On Ubuntu, it is configured by default. - - On CentOS, RHEL, openSUSE, and SLES: - - .. code-block:: console - - # systemctl enable rabbitmq-server.service - # systemctl start rabbitmq-server.service - -#. Verify that the nodes are running: - - .. code-block:: console - - # rabbitmqctl cluster_status - Cluster status of node rabbit@NODE... - [{nodes,[{disc,[rabbit@NODE]}]}, - {running_nodes,[rabbit@NODE]}, - {partitions,[]}] - ...done. - -#. Run the following commands on each node except the first one: - - .. code-block:: console - - # rabbitmqctl stop_app - Stopping node rabbit@NODE... - ...done. - # rabbitmqctl join_cluster rabbit@rabbit1 - # rabbitmqctl start_app - Starting node rabbit@NODE ... - ...done. - -.. note:: - - The default node type is a disc node. In this guide, nodes - join the cluster as disc nodes. Also, nodes can join the cluster - as RAM nodes. For more details about this feature, check - `Clusters with RAM nodes `_. - -#. Verify the cluster status: - - .. code-block:: console - - # rabbitmqctl cluster_status - Cluster status of node rabbit@NODE... - [{nodes,[{disc,[rabbit@rabbit1]},{ram,[rabbit@NODE]}]}, \ - {running_nodes,[rabbit@NODE,rabbit@rabbit1]}] - - If the cluster is working, you can create usernames and passwords - for the queues. - -#. To ensure that all queues except those with auto-generated names - are mirrored across all running nodes, - set the ``ha-mode`` policy key to all - by running the following command on one of the nodes: - - .. code-block:: console - - # rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}' - -More information is available in the RabbitMQ documentation: - -- `Highly Available Queues `_ -- `Clustering Guide `_ - -.. note:: - - As another option to make RabbitMQ highly available, RabbitMQ contains the - OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. - It provides the active/active RabbitMQ cluster with mirrored queues. - For more information, see `Auto-configuration of a cluster with - a Pacemaker `_. - -.. _rabbitmq-services: - -Configure OpenStack services to use Rabbit HA queues -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Configure the OpenStack components to use at least two RabbitMQ nodes. - -Use these steps to configurate all services using RabbitMQ: - -#. RabbitMQ HA cluster Transport URL using ``[user:pass@]host:port`` format: - - .. code-block:: ini - - transport_url = rabbit://RABBIT_USER:RABBIT_PASS@rabbit1:5672, - RABBIT_USER:RABBIT_PASS@rabbit2:5672 - - Replace ``RABBIT_USER`` with RabbitMQ username and ``RABBIT_PASS`` with - password for respective RabbitMQ host. For more information, see - `oslo messaging transport - `_. - -#. Retry connecting with RabbitMQ: - - .. code-block:: console - - rabbit_retry_interval=1 - -#. How long to back-off for between retries when connecting to RabbitMQ: - - .. code-block:: console - - rabbit_retry_backoff=2 - -#. Maximum retries with trying to connect to RabbitMQ (infinite by default): - - .. code-block:: console - - rabbit_max_retries=0 - -#. Use durable queues in RabbitMQ: - - .. code-block:: console - - rabbit_durable_queues=true - -#. Use HA queues in RabbitMQ (``x-ha-policy: all``): - - .. code-block:: console - - rabbit_ha_queues=true - -.. note:: - - If you change the configuration from an old set-up - that did not use HA queues, restart the service: - - .. code-block:: console - - # rabbitmqctl stop_app - # rabbitmqctl reset - # rabbitmqctl start_app diff --git a/doc/ha-guide/source/shared-services.rst b/doc/ha-guide/source/shared-services.rst deleted file mode 100644 index 933c807a0a..0000000000 --- a/doc/ha-guide/source/shared-services.rst +++ /dev/null @@ -1,12 +0,0 @@ -=============================== -Configuring the shared services -=============================== - -This chapter describes the shared services for high availability, -such as database, messaging service. - -.. toctree:: - :maxdepth: 2 - - shared-database.rst - shared-messaging.rst diff --git a/doc/ha-guide/source/storage-ha-backend.rst b/doc/ha-guide/source/storage-ha-backend.rst deleted file mode 100644 index f0e8786a2a..0000000000 --- a/doc/ha-guide/source/storage-ha-backend.rst +++ /dev/null @@ -1,59 +0,0 @@ - -.. _storage-ha-backend: - -================ -Storage back end -================ - -An OpenStack environment includes multiple data pools for the VMs: - -- Ephemeral storage is allocated for an instance and is deleted when the - instance is deleted. The Compute service manages ephemeral storage and - by default, Compute stores ephemeral drives as files on local disks on the - compute node. As an alternative, you can use Ceph RBD as the storage back - end for ephemeral storage. - -- Persistent storage exists outside all instances. Two types of persistent - storage are provided: - - - The Block Storage service (cinder) that can use LVM or Ceph RBD as the - storage back end. - - The Image service (glance) that can use the Object Storage service (swift) - or Ceph RBD as the storage back end. - -For more information about configuring storage back ends for -the different storage options, see `Manage volumes -`_ -in the OpenStack Administrator Guide. - -This section discusses ways to protect against data loss in your OpenStack -environment. - -RAID drives ------------ - -Configuring RAID on the hard drives that implement storage protects your data -against a hard drive failure. If the node itself fails, data may be lost. -In particular, all volumes stored on an LVM node can be lost. - -Ceph ----- - -`Ceph RBD `_ is an innately high availability storage back -end. It creates a storage cluster with multiple nodes that communicate with -each other to replicate and redistribute data dynamically. -A Ceph RBD storage cluster provides a single shared set of storage nodes that -can handle all classes of persistent and ephemeral data (glance, cinder, and -nova) that are required for OpenStack instances. - -Ceph RBD provides object replication capabilities by storing Block Storage -volumes as Ceph RBD objects. Ceph RBD ensures that each replica of an object -is stored on a different node. This means that your volumes are protected -against hard drive and node failures, or even the failure of the data center -itself. - -When Ceph RBD is used for ephemeral volumes as well as block and image storage, -it supports `live migration -`_ -of VMs with ephemeral drives. LVM only supports live migration of -volume-backed VMs. diff --git a/doc/ha-guide/source/storage-ha-block.rst b/doc/ha-guide/source/storage-ha-block.rst deleted file mode 100644 index a667a7d942..0000000000 --- a/doc/ha-guide/source/storage-ha-block.rst +++ /dev/null @@ -1,189 +0,0 @@ -================================== -Highly available Block Storage API -================================== - -Cinder provides Block-Storage-as-a-Service suitable for performance -sensitive scenarios such as databases, expandable file systems, or -providing a server with access to raw block level storage. - -Persistent block storage can survive instance termination and can also -be moved across instances like any external storage device. Cinder -also has volume snapshots capability for backing up the volumes. - -Making the Block Storage API service highly available in -active/passive mode involves: - -- :ref:`ha-blockstorage-pacemaker` -- :ref:`ha-blockstorage-configure` -- :ref:`ha-blockstorage-services` - -In theory, you can run the Block Storage service as active/active. -However, because of sufficient concerns, we recommend running -the volume component as active/passive only. - -You can read more about these concerns on the -`Red Hat Bugzilla `_ -and there is a -`psuedo roadmap `_ -for addressing them upstream. - -.. _ha-blockstorage-pacemaker: - -Add Block Storage API resource to Pacemaker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On RHEL-based systems, create resources for cinder's systemd agents and create -constraints to enforce startup/shutdown ordering: - -.. code-block:: console - - pcs resource create openstack-cinder-api systemd:openstack-cinder-api --clone interleave=true - pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler --clone interleave=true - pcs resource create openstack-cinder-volume systemd:openstack-cinder-volume - - pcs constraint order start openstack-cinder-api-clone then openstack-cinder-scheduler-clone - pcs constraint colocation add openstack-cinder-scheduler-clone with openstack-cinder-api-clone - pcs constraint order start openstack-cinder-scheduler-clone then openstack-cinder-volume - pcs constraint colocation add openstack-cinder-volume with openstack-cinder-scheduler-clone - - -If the Block Storage service runs on the same nodes as the other services, -then it is advisable to also include: - -.. code-block:: console - - pcs constraint order start openstack-keystone-clone then openstack-cinder-api-clone - -Alternatively, instead of using systemd agents, download and -install the OCF resource agent: - -.. code-block:: console - - # cd /usr/lib/ocf/resource.d/openstack - # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/cinder-api - # chmod a+rx * - -You can now add the Pacemaker configuration for Block Storage API resource. -Connect to the Pacemaker cluster with the :command:`crm configure` command -and add the following cluster resources: - -.. code-block:: none - - primitive p_cinder-api ocf:openstack:cinder-api \ - params config="/etc/cinder/cinder.conf" \ - os_password="secretsecret" \ - os_username="admin" \ - os_tenant_name="admin" \ - keystone_get_token_url="http://10.0.0.11:5000/v2.0/tokens" \ - op monitor interval="30s" timeout="30s" - -This configuration creates ``p_cinder-api``, a resource for managing the -Block Storage API service. - -The command :command:`crm configure` supports batch input, copy and paste the -lines above into your live Pacemaker configuration and then make changes as -required. For example, you may enter ``edit p_ip_cinder-api`` from the -:command:`crm configure` menu and edit the resource to match your preferred -virtual IP address. - -Once completed, commit your configuration changes by entering :command:`commit` -from the :command:`crm configure` menu. Pacemaker then starts the Block Storage -API service and its dependent resources on one of your nodes. - -.. _ha-blockstorage-configure: - -Configure Block Storage API service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the ``/etc/cinder/cinder.conf`` file. For example, on a RHEL-based system: - -.. code-block:: ini - :linenos: - - [DEFAULT] - # This is the name which we should advertise ourselves as and for - # A/P installations it should be the same everywhere - host = cinder-cluster-1 - - # Listen on the Block Storage VIP - osapi_volume_listen = 10.0.0.11 - - auth_strategy = keystone - control_exchange = cinder - - volume_driver = cinder.volume.drivers.nfs.NfsDriver - nfs_shares_config = /etc/cinder/nfs_exports - nfs_sparsed_volumes = true - nfs_mount_options = v3 - - [database] - connection = mysql+pymysql://cinder:CINDER_DBPASS@10.0.0.11/cinder - max_retries = -1 - - [keystone_authtoken] - # 10.0.0.11 is the Keystone VIP - identity_uri = http://10.0.0.11:5000/ - www_authenticate_uri = http://10.0.0.11:5000/ - admin_tenant_name = service - admin_user = cinder - admin_password = CINDER_PASS - - [oslo_messaging_rabbit] - # Explicitly list the rabbit hosts as it doesn't play well with HAProxy - rabbit_hosts = 10.0.0.12,10.0.0.13,10.0.0.14 - # As a consequence, we also need HA queues - rabbit_ha_queues = True - heartbeat_timeout_threshold = 60 - heartbeat_rate = 2 - -Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage -database. Replace ``CINDER_PASS`` with the password you chose for the -``cinder`` user in the Identity service. - -This example assumes that you are using NFS for the physical storage, which -will almost never be true in a production installation. - -If you are using the Block Storage service OCF agent, some settings will -be filled in for you, resulting in a shorter configuration file: - -.. code-block:: ini - :linenos: - - # We have to use MySQL connection to store data: - connection = mysql+pymysql://cinder:CINDER_DBPASS@10.0.0.11/cinder - # Alternatively, you can switch to pymysql, - # a new Python 3 compatible library and use - # connection = mysql+pymysql://cinder:CINDER_DBPASS@10.0.0.11/cinder - # and be ready when everything moves to Python 3. - # Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation - - # We bind Block Storage API to the VIP: - osapi_volume_listen = 10.0.0.11 - - # We send notifications to High Available RabbitMQ: - notifier_strategy = rabbit - rabbit_host = 10.0.0.11 - -Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage -database. - -.. _ha-blockstorage-services: - -Configure OpenStack services to use the highly available Block Storage API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your OpenStack services must now point their Block Storage API configuration -to the highly available, virtual cluster IP address rather than a Block Storage -API server’s physical IP address as you would for a non-HA environment. - -Create the Block Storage API endpoint with this IP. - -If you are using both private and public IP addresses, create two virtual IPs -and define your endpoint. For example: - -.. code-block:: console - - $ openstack endpoint create volume --region $KEYSTONE_REGION \ - --publicurl 'http://PUBLIC_VIP:8776/v1/%(tenant_id)s' \ - --adminurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \ - --internalurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' diff --git a/doc/ha-guide/source/storage-ha-file-systems.rst b/doc/ha-guide/source/storage-ha-file-systems.rst deleted file mode 100644 index 11e235812b..0000000000 --- a/doc/ha-guide/source/storage-ha-file-systems.rst +++ /dev/null @@ -1,113 +0,0 @@ -======================================== -Highly available Shared File Systems API -======================================== - -Making the Shared File Systems (manila) API service highly available -in active/passive mode involves: - -- :ref:`ha-sharedfilesystems-pacemaker` -- :ref:`ha-sharedfilesystems-configure` -- :ref:`ha-sharedfilesystems-services` - -.. _ha-sharedfilesystems-pacemaker: - -Add Shared File Systems API resource to Pacemaker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Download the resource agent to your system: - - .. code-block:: console - - # cd /usr/lib/ocf/resource.d/openstack - # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/manila-api - # chmod a+rx * - -#. Add the Pacemaker configuration for the Shared File Systems - API resource. Connect to the Pacemaker cluster with the following - command: - - .. code-block:: console - - # crm configure - - .. note:: - - The :command:`crm configure` supports batch input. Copy and paste - the lines in the next step into your live Pacemaker configuration and then - make changes as required. - - For example, you may enter ``edit p_ip_manila-api`` from the - :command:`crm configure` menu and edit the resource to match your preferred - virtual IP address. - -#. Add the following cluster resources: - - .. code-block:: none - - primitive p_manila-api ocf:openstack:manila-api \ - params config="/etc/manila/manila.conf" \ - os_password="secretsecret" \ - os_username="admin" \ - os_tenant_name="admin" \ - keystone_get_token_url="http://10.0.0.11:5000/v2.0/tokens" \ - op monitor interval="30s" timeout="30s" - - This configuration creates ``p_manila-api``, a resource for managing the - Shared File Systems API service. - -#. Commit your configuration changes by entering the following command - from the :command:`crm configure` menu: - - .. code-block:: console - - # commit - -Pacemaker now starts the Shared File Systems API service and its -dependent resources on one of your nodes. - -.. _ha-sharedfilesystems-configure: - -Configure Shared File Systems API service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the :file:`/etc/manila/manila.conf` file: - -.. code-block:: ini - :linenos: - - # We have to use MySQL connection to store data: - sql_connection = mysql+pymysql://manila:password@10.0.0.11/manila?charset=utf8 - - # We bind Shared File Systems API to the VIP: - osapi_volume_listen = 10.0.0.11 - - # We send notifications to High Available RabbitMQ: - notifier_strategy = rabbit - rabbit_host = 10.0.0.11 - - -.. _ha-sharedfilesystems-services: - -Configure OpenStack services to use HA Shared File Systems API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your OpenStack services must now point their Shared File Systems API -configuration to the highly available, virtual cluster IP address rather than -a Shared File Systems API server’s physical IP address as you would -for a non-HA environment. - -You must create the Shared File Systems API endpoint with this IP. - -If you are using both private and public IP addresses, you should create two -virtual IPs and define your endpoints like this: - -.. code-block:: console - - $ openstack endpoint create --region RegionOne \ - sharev2 public 'http://PUBLIC_VIP:8786/v2/%(tenant_id)s' - - $ openstack endpoint create --region RegionOne \ - sharev2 internal 'http://10.0.0.11:8786/v2/%(tenant_id)s' - - $ openstack endpoint create --region RegionOne \ - sharev2 admin 'http://10.0.0.11:8786/v2/%(tenant_id)s' diff --git a/doc/ha-guide/source/storage-ha-image.rst b/doc/ha-guide/source/storage-ha-image.rst deleted file mode 100644 index 9081a5ebeb..0000000000 --- a/doc/ha-guide/source/storage-ha-image.rst +++ /dev/null @@ -1,141 +0,0 @@ -========================== -Highly available Image API -========================== - -The OpenStack Image service offers a service for discovering, registering, and -retrieving virtual machine images. To make the OpenStack Image API service -highly available in active/passive mode, you must: - -- :ref:`glance-api-pacemaker` -- :ref:`glance-api-configure` -- :ref:`glance-services` - -Prerequisites -~~~~~~~~~~~~~ - -Before beginning, ensure that you are familiar with the -documentation for installing the OpenStack Image API service. -See the *Image service* section in the -`Installation Guides `_, -depending on your distribution. - -.. _glance-api-pacemaker: - -Add OpenStack Image API resource to Pacemaker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Download the resource agent to your system: - - .. code-block:: console - - # cd /usr/lib/ocf/resource.d/openstack - # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api - # chmod a+rx * - -#. Add the Pacemaker configuration for the OpenStack Image API resource. - Use the following command to connect to the Pacemaker cluster: - - .. code-block:: console - - crm configure - - .. note:: - - The :command:`crm configure` command supports batch input. Copy and paste - the lines in the next step into your live Pacemaker configuration and - then make changes as required. - - For example, you may enter ``edit p_ip_glance-api`` from the - :command:`crm configure` menu and edit the resource to match your - preferred virtual IP address. - -#. Add the following cluster resources: - - .. code-block:: console - - primitive p_glance-api ocf:openstack:glance-api \ - params config="/etc/glance/glance-api.conf" \ - os_password="secretsecret" \ - os_username="admin" os_tenant_name="admin" \ - os_auth_url="http://10.0.0.11:5000/v2.0/" \ - op monitor interval="30s" timeout="30s" - - This configuration creates ``p_glance-api``, a resource for managing the - OpenStack Image API service. - -#. Commit your configuration changes by entering the following command from - the :command:`crm configure` menu: - - .. code-block:: console - - commit - -Pacemaker then starts the OpenStack Image API service and its dependent -resources on one of your nodes. - -.. _glance-api-configure: - -Configure OpenStack Image service API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the :file:`/etc/glance/glance-api.conf` file -to configure the OpenStack Image service: - -.. code-block:: ini - - # We have to use MySQL connection to store data: - sql_connection=mysql://glance:password@10.0.0.11/glance - # Alternatively, you can switch to pymysql, - # a new Python 3 compatible library and use - # sql_connection=mysql+pymysql://glance:password@10.0.0.11/glance - # and be ready when everything moves to Python 3. - # Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation - - # We bind OpenStack Image API to the VIP: - bind_host = 10.0.0.11 - - # Connect to OpenStack Image registry service: - registry_host = 10.0.0.11 - - # We send notifications to High Available RabbitMQ: - notifier_strategy = rabbit - rabbit_host = 10.0.0.11 - -[TODO: need more discussion of these parameters] - -.. _glance-services: - -Configure OpenStack services to use the highly available OpenStack Image API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your OpenStack services must now point their OpenStack Image API configuration -to the highly available, virtual cluster IP address instead of pointing to the -physical IP address of an OpenStack Image API server as you would in a non-HA -cluster. - -For example, if your OpenStack Image API service IP address is 10.0.0.11 -(as in the configuration explained here), you would use the following -configuration in your :file:`nova.conf` file: - -.. code-block:: ini - - [glance] - # ... - api_servers = 10.0.0.11 - # ... - - -You must also create the OpenStack Image API endpoint with this IP address. -If you are using both private and public IP addresses, create two virtual IP -addresses and define your endpoint. For example: - -.. code-block:: console - - $ openstack endpoint create --region $KEYSTONE_REGION \ - image public http://PUBLIC_VIP:9292 - - $ openstack endpoint create --region $KEYSTONE_REGION \ - image admin http://10.0.0.11:9292 - - $ openstack endpoint create --region $KEYSTONE_REGION \ - image internal http://10.0.0.11:9292 diff --git a/doc/ha-guide/source/storage-ha.rst b/doc/ha-guide/source/storage-ha.rst deleted file mode 100644 index 583b214ef9..0000000000 --- a/doc/ha-guide/source/storage-ha.rst +++ /dev/null @@ -1,20 +0,0 @@ -=================== -Configuring storage -=================== - -.. toctree:: - :maxdepth: 2 - - storage-ha-image.rst - storage-ha-block.rst - storage-ha-file-systems.rst - storage-ha-backend.rst - -Making the Block Storage (cinder) API service highly available in -active/active mode involves: - -* Configuring Block Storage to listen on the VIP address - -* Managing the Block Storage API daemon with the Pacemaker cluster manager - -* Configuring OpenStack services to use this IP address diff --git a/tools/build-all-rst.sh b/tools/build-all-rst.sh index a61038649c..9cdc47668b 100755 --- a/tools/build-all-rst.sh +++ b/tools/build-all-rst.sh @@ -28,12 +28,11 @@ done # PDF targets for Install guides are dealt in build-install-guides-rst.sh PDF_TARGETS=( 'arch-design'\ - 'ha-guide' \ 'image-guide' \ 'install-guide') for guide in arch-design doc-contrib-guide glossary \ - ha-guide ha-guide-draft image-guide install-guide; do + ha-guide-draft image-guide install-guide; do if [[ ${PDF_TARGETS[*]} =~ $guide ]]; then tools/build-rst.sh doc/$guide --build build \ --target $guide $LINKCHECK $PDF_OPTION diff --git a/www/ja/index.html b/www/ja/index.html index d95f5fa0e9..b059ef0edc 100644 --- a/www/ja/index.html +++ b/www/ja/index.html @@ -58,8 +58,6 @@ ネットワークガイド (Mitaka 版) ネットワークガイド (Liberty 版)

OpenStack Networking (neutron) のデプロイと管理

- 高可用性ガイド -

高可用性 OpenStack のためのインストールと設定方法

セキュリティーガイド

より安全な OpenStack クラウド構築のためのガイドラインとシナリオ

仮想マシンイメージガイド diff --git a/www/ocata/index.html b/www/ocata/index.html index 7085e64079..a23070815d 100644 --- a/www/ocata/index.html +++ b/www/ocata/index.html @@ -30,7 +30,7 @@

Operations and Administration Guides

Administrator Guides

Manage and troubleshoot an OpenStack cloud

- High Availability Guide[PDF] + High Availability Guide

Install and configure OpenStack for high availability

Security Guide

Guidelines and scenarios for creating more secure OpenStack clouds

diff --git a/www/pike/index.html b/www/pike/index.html index a362cdc6e6..c0b6780a4e 100644 --- a/www/pike/index.html +++ b/www/pike/index.html @@ -30,7 +30,7 @@

Operations and Administration Guides

Administrator Guides

Manage and troubleshoot an OpenStack cloud

- High Availability Guide[PDF] + High Availability Guide

Install and configure OpenStack for high availability

Architecture Guide

Plan and design an OpenStack cloud

diff --git a/www/queens/index.html b/www/queens/index.html index a362cdc6e6..c0b6780a4e 100644 --- a/www/queens/index.html +++ b/www/queens/index.html @@ -30,7 +30,7 @@

Operations and Administration Guides

Administrator Guides

Manage and troubleshoot an OpenStack cloud

- High Availability Guide[PDF] + High Availability Guide

Install and configure OpenStack for high availability

Architecture Guide

Plan and design an OpenStack cloud

diff --git a/www/rocky/index.html b/www/rocky/index.html index 913666de7f..cd2bb98689 100644 --- a/www/rocky/index.html +++ b/www/rocky/index.html @@ -30,7 +30,7 @@

Operations and Administration Guides

Administrator Guides

Manage and troubleshoot an OpenStack cloud

- High Availability Guide[PDF] + High Availability Guide

Install and configure OpenStack for high availability

Architecture Guide

Plan and design an OpenStack cloud

diff --git a/www/static/sitemap.xml b/www/static/sitemap.xml index 9b08e628da..7d1d911cd9 100644 --- a/www/static/sitemap.xml +++ b/www/static/sitemap.xml @@ -41,12 +41,6 @@ 2018-09-08T19:51:33+0000 daily - - 1.0 - https://docs.openstack.org/ha-guide/ - 2018-09-08T19:49:28+0000 - daily - 1.0 https://docs.openstack.org/upstream-training/ @@ -1013,12 +1007,6 @@ 2018-08-15T13:18:23+0000 weekly - - 1.0 - https://docs.openstack.org/ha-guide/HAGuide.pdf - 2018-09-08T19:49:42+0000 - daily - 1.0 https://docs.openstack.org/image-guide/ImageGuide.pdf @@ -90725,12 +90713,6 @@ 2018-08-30T17:22:12+0000 weekly - - 1.0 - https://docs.openstack.org/ha-guide/index.html - 2018-09-08T19:49:28+0000 - daily - 1.0 https://docs.openstack.org/congress/queens/admin/ha-deployment.html @@ -298979,12 +298961,6 @@ 2018-08-21T00:42:10+0000 weekly - - 1.0 - https://docs.openstack.org/ja/ha-guide/ - 2018-09-08T19:53:25+0000 - daily - 1.0 https://docs.openstack.org/newton/ja/install-guide-ubuntu/ @@ -301361,90 +301337,6 @@ 2017-10-11T08:24:53+0000 weekly - - 1.0 - https://docs.openstack.org/ja/ha-guide/common/glossary.html - 2018-09-08T19:53:24+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/common/app-support.html - 2018-09-08T19:53:22+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/appendix.html - 2018-09-08T19:53:22+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/compute-node-ha.html - 2018-09-08T19:53:24+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/storage-ha-backend.html - 2018-09-08T19:53:26+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/storage-ha-file-systems.html - 2018-09-08T19:53:26+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/storage-ha-block.html - 2018-09-08T19:53:26+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/storage-ha-image.html - 2018-09-08T19:53:26+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/storage-ha.html - 2018-09-08T19:53:26+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/networking-ha-l3.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/networking-ha-dhcp.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/networking-ha.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/controller-ha-telemetry.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/controller-ha-identity.html - 2018-09-08T19:53:24+0000 - daily - 1.0 https://docs.openstack.org/ja/user-guide/cli-access-instance-through-a-console.html @@ -301607,108 +301499,12 @@ 2017-07-18T09:01:39+0000 daily - - 1.0 - https://docs.openstack.org/ja/ha-guide/controller-ha-memcached.html - 2018-09-08T19:53:24+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/controller-ha-haproxy.html - 2018-09-08T19:53:24+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/controller-ha-vip.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/controller-ha-pacemaker.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/intro-ha-arch-pacemaker.html - 2018-09-08T19:53:25+0000 - daily - 1.0 https://docs.openstack.org/newton/ja/install-guide-ubuntu/common/get-started-conceptual-architecture.html 2017-10-11T08:24:53+0000 weekly - - 1.0 - https://docs.openstack.org/ja/ha-guide/controller-ha.html - 2018-09-08T19:53:24+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/shared-messaging.html - 2018-09-08T19:53:26+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/shared-database.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/shared-services.html - 2018-09-08T19:53:26+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/environment-memcached.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/environment-ntp.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/environment-operatingsystem.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/environment-hardware.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/environment.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/intro-ha.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/common/conventions.html - 2018-09-08T19:53:22+0000 - daily - 1.0 https://docs.openstack.org/ja/security-guide/common/glossary.html @@ -301829,12 +301625,6 @@ 2018-09-01T07:59:05+0000 daily - - 1.0 - https://docs.openstack.org/ja/ha-guide/index.html - 2018-09-08T19:53:25+0000 - daily - 1.0 https://docs.openstack.org/ja/ops-guide/ops-user-facing-operations.html @@ -302081,18 +301871,6 @@ 2017-07-19T10:36:22+0000 daily - - 1.0 - https://docs.openstack.org/ja/ha-guide/shared-database-configure.html - 2018-09-08T19:53:25+0000 - daily - - - 1.0 - https://docs.openstack.org/ja/ha-guide/shared-database-manage.html - 2018-09-08T19:53:25+0000 - daily - 1.0 https://docs.openstack.org/ja/ops-guide/ops-maintenance-controller.html @@ -344987,120 +344765,6 @@ 2018-09-04T06:41:21+0000 daily - - 1.0 - https://docs.openstack.org/ha-guide/networking-ha-dhcp.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/controller-ha-memcached.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/controller-ha-identity.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/networking-ha-l3.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/controller-ha-pacemaker.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/controller-ha-vip.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/compute-node-ha.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/storage-ha-file-systems.html - 2018-09-08T19:49:29+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/controller-ha-haproxy.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/common/app-support.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/intro-ha-arch-pacemaker.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/storage-ha-backend.html - 2018-09-08T19:49:29+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/controller-ha-telemetry.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/storage-ha.html - 2018-09-08T19:49:29+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/appendix.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/storage-ha-block.html - 2018-09-08T19:49:29+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/storage-ha-image.html - 2018-09-08T19:49:29+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/networking-ha.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/common/glossary.html - 2018-09-08T19:49:28+0000 - daily - 1.0 https://docs.openstack.org/ko_KR/install-guide/ @@ -345149,78 +344813,12 @@ 2018-08-28T17:50:00+0000 daily - - 1.0 - https://docs.openstack.org/ha-guide/shared-database.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/environment-operatingsystem.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/environment.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/common/conventions.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/shared-messaging.html - 2018-09-08T19:49:29+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/environment-memcached.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/intro-ha.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/environment-ntp.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/shared-services.html - 2018-09-08T19:49:29+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/controller-ha.html - 2018-09-08T19:49:28+0000 - daily - 1.0 https://docs.openstack.org/infra/manual/drivers.html 2018-08-28T17:50:01+0000 daily - - 1.0 - https://docs.openstack.org/ha-guide/environment-hardware.html - 2018-09-08T19:49:28+0000 - daily - 1.0 https://docs.openstack.org/infra/manual/sandbox.html @@ -345353,18 +344951,6 @@ 2015-06-12T20:49:17+0000 daily - - 1.0 - https://docs.openstack.org/ha-guide/shared-database-manage.html - 2018-09-08T19:49:28+0000 - daily - - - 1.0 - https://docs.openstack.org/ha-guide/shared-database-configure.html - 2018-09-08T19:49:28+0000 - daily - 1.0 https://docs.openstack.org/ko_KR/install-guide/firewalls-default-ports.html diff --git a/www/stein/index.html b/www/stein/index.html index 913666de7f..cd2bb98689 100644 --- a/www/stein/index.html +++ b/www/stein/index.html @@ -30,7 +30,7 @@

Operations and Administration Guides

Administrator Guides

Manage and troubleshoot an OpenStack cloud

- High Availability Guide[PDF] + High Availability Guide

Install and configure OpenStack for high availability

Architecture Guide

Plan and design an OpenStack cloud

diff --git a/www/templates/ops_and_admin_guides.tmpl b/www/templates/ops_and_admin_guides.tmpl index 047b9030d0..3a6221d12b 100644 --- a/www/templates/ops_and_admin_guides.tmpl +++ b/www/templates/ops_and_admin_guides.tmpl @@ -1,7 +1,7 @@

Operations and Administration Guides

Administrator Guides

Manage and troubleshoot an OpenStack cloud

- High Availability Guide[PDF] + High Availability Guide

Install and configure OpenStack for high availability

Security Guide

Guidelines and scenarios for creating more secure OpenStack clouds