diff --git a/doc/common/app-support.rst b/doc/common/app-support.rst new file mode 100644 index 00000000..3c8f960a --- /dev/null +++ b/doc/common/app-support.rst @@ -0,0 +1,230 @@ +.. ## WARNING ########################################################## +.. This file is synced from openstack/openstack-manuals repository to +.. other related repositories. If you need to make changes to this file, +.. make the changes in openstack-manuals. After any change merged to, +.. openstack-manuals, automatically a patch for others will be proposed. +.. ##################################################################### + +================= +Community support +================= + +The following resources are available to help you run and use OpenStack. +The OpenStack community constantly improves and adds to the main +features of OpenStack, but if you have any questions, do not hesitate to +ask. Use the following resources to get OpenStack support and +troubleshoot your installations. + +Documentation +~~~~~~~~~~~~~ + +For the available OpenStack documentation, see +`docs.openstack.org `_. + +The following guides explain how to install a Proof-of-Concept OpenStack cloud +and its associated components: + +* `Rocky Installation Guides `_ + +The following books explain how to configure and run an OpenStack cloud: + +* `Architecture Design Guide `_ + +* `Rocky Administrator Guides `_ + +* `Rocky Configuration Guides `_ + +* `Rocky Networking Guide `_ + +* `High Availability Guide `_ + +* `Security Guide `_ + +* `Virtual Machine Image Guide `_ + +The following book explains how to use the command-line clients: + +* `Rocky API Bindings + `_ + +The following documentation provides reference and guidance information +for the OpenStack APIs: + +* `API Documentation `_ + +The following guide provides information on how to contribute to OpenStack +documentation: + +* `Documentation Contributor Guide `_ + +ask.openstack.org +~~~~~~~~~~~~~~~~~ + +During the set up or testing of OpenStack, you might have questions +about how a specific task is completed or be in a situation where a +feature does not work correctly. Use the +`ask.openstack.org `_ site to ask questions +and get answers. When you visit the `Ask OpenStack +`_ site, scan +the recently asked questions to see whether your question has already +been answered. If not, ask a new question. Be sure to give a clear, +concise summary in the title and provide as much detail as possible in +the description. Paste in your command output or stack traces, links to +screen shots, and any other information which might be useful. + +The OpenStack wiki +~~~~~~~~~~~~~~~~~~ + +The `OpenStack wiki `_ contains a broad +range of topics but some of the information can be difficult to find or +is a few pages deep. Fortunately, the wiki search feature enables you to +search by title or content. If you search for specific information, such +as about networking or OpenStack Compute, you can find a large amount +of relevant material. More is being added all the time, so be sure to +check back often. You can find the search box in the upper-right corner +of any OpenStack wiki page. + +The Launchpad bugs area +~~~~~~~~~~~~~~~~~~~~~~~ + +The OpenStack community values your set up and testing efforts and wants +your feedback. To log a bug, you must `sign up for a Launchpad account +`_. You can view existing bugs and report bugs +in the Launchpad Bugs area. Use the search feature to determine whether +the bug has already been reported or already been fixed. If it still +seems like your bug is unreported, fill out a bug report. + +Some tips: + +* Give a clear, concise summary. + +* Provide as much detail as possible in the description. Paste in your + command output or stack traces, links to screen shots, and any other + information which might be useful. + +* Be sure to include the software and package versions that you are + using, especially if you are using a development branch, such as, + ``"Kilo release" vs git commit bc79c3ecc55929bac585d04a03475b72e06a3208``. + +* Any deployment-specific information is helpful, such as whether you + are using Ubuntu 14.04 or are performing a multi-node installation. + +The following Launchpad Bugs areas are available: + +* `Bugs: OpenStack Block Storage + (cinder) `_ + +* `Bugs: OpenStack Compute (nova) `_ + +* `Bugs: OpenStack Dashboard + (horizon) `_ + +* `Bugs: OpenStack Identity + (keystone) `_ + +* `Bugs: OpenStack Image service + (glance) `_ + +* `Bugs: OpenStack Networking + (neutron) `_ + +* `Bugs: OpenStack Object Storage + (swift) `_ + +* `Bugs: Application catalog (murano) `_ + +* `Bugs: Bare metal service (ironic) `_ + +* `Bugs: Clustering service (senlin) `_ + +* `Bugs: Container Infrastructure Management service (magnum) `_ + +* `Bugs: Data processing service + (sahara) `_ + +* `Bugs: Database service (trove) `_ + +* `Bugs: DNS service (designate) `_ + +* `Bugs: Key Manager Service (barbican) `_ + +* `Bugs: Monitoring (monasca) `_ + +* `Bugs: Orchestration (heat) `_ + +* `Bugs: Rating (cloudkitty) `_ + +* `Bugs: Shared file systems (manila) `_ + +* `Bugs: Telemetry + (ceilometer) `_ + +* `Bugs: Telemetry v3 + (gnocchi) `_ + +* `Bugs: Workflow service + (mistral) `_ + +* `Bugs: Messaging service + (zaqar) `_ + +* `Bugs: Container service + (zun) `_ + +* `Bugs: OpenStack API Documentation + (developer.openstack.org) `_ + +* `Bugs: OpenStack Documentation + (docs.openstack.org) `_ + +Documentation feedback +~~~~~~~~~~~~~~~~~~~~~~ + +To provide feedback on documentation, join our IRC channel ``#openstack-doc`` +on the Freenode IRC network, or `report a bug in Launchpad +`_ and choose the particular +project that the documentation is a part of. + +The OpenStack IRC channel +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The OpenStack community lives in the #openstack IRC channel on the +Freenode network. You can hang out, ask questions, or get immediate +feedback for urgent and pressing issues. To install an IRC client or use +a browser-based client, go to +`https://webchat.freenode.net/ `_. You can +also use `Colloquy `_ (Mac OS X), +`mIRC `_ (Windows), +or XChat (Linux). When you are in the IRC channel +and want to share code or command output, the generally accepted method +is to use a Paste Bin. The OpenStack project has one at `Paste +`_. Just paste your longer amounts of text or +logs in the web form and you get a URL that you can paste into the +channel. The OpenStack IRC channel is ``#openstack`` on +``irc.freenode.net``. You can find a list of all OpenStack IRC channels on +the `IRC page on the wiki `_. + +OpenStack mailing lists +~~~~~~~~~~~~~~~~~~~~~~~ + +A great way to get answers and insights is to post your question or +problematic scenario to the OpenStack mailing list. You can learn from +and help others who might have similar issues. To subscribe or view the +archives, go to the `general OpenStack mailing list +`_. If you are +interested in the other mailing lists for specific projects or development, +refer to `Mailing Lists `_. + +OpenStack distribution packages +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following Linux distributions provide community-supported packages +for OpenStack: + +* **CentOS, Fedora, and Red Hat Enterprise Linux:** + https://www.rdoproject.org/ + +* **openSUSE and SUSE Linux Enterprise Server:** + https://en.opensuse.org/Portal:OpenStack + +* **Ubuntu:** https://wiki.ubuntu.com/OpenStack/CloudArchive diff --git a/doc/common/appendix.rst b/doc/common/appendix.rst new file mode 100644 index 00000000..1d7d8143 --- /dev/null +++ b/doc/common/appendix.rst @@ -0,0 +1,8 @@ +Appendix +~~~~~~~~ + +.. toctree:: + :maxdepth: 1 + + app-support.rst + glossary.rst diff --git a/doc/common/conventions.rst b/doc/common/conventions.rst new file mode 100644 index 00000000..b3cbabb2 --- /dev/null +++ b/doc/common/conventions.rst @@ -0,0 +1,47 @@ +.. ## WARNING ########################################################## +.. This file is synced from openstack/openstack-manuals repository to +.. other related repositories. If you need to make changes to this file, +.. make the changes in openstack-manuals. After any change merged to, +.. openstack-manuals, automatically a patch for others will be proposed. +.. ##################################################################### + +=========== +Conventions +=========== + +The OpenStack documentation uses several typesetting conventions. + +Notices +~~~~~~~ + +Notices take these forms: + +.. note:: A comment with additional information that explains a part of the + text. + +.. important:: Something you must be aware of before proceeding. + +.. tip:: An extra but helpful piece of practical advice. + +.. caution:: Helpful information that prevents the user from making mistakes. + +.. warning:: Critical information about the risk of data loss or security + issues. + +Command prompts +~~~~~~~~~~~~~~~ + +.. code-block:: console + + $ command + +Any user, including the ``root`` user, can run commands that are +prefixed with the ``$`` prompt. + +.. code-block:: console + + # command + +The ``root`` user must run commands that are prefixed with the ``#`` +prompt. You can also prefix these commands with the :command:`sudo` +command, if available, to run them. diff --git a/doc/common/glossary.rst b/doc/common/glossary.rst new file mode 100644 index 00000000..404e0d67 --- /dev/null +++ b/doc/common/glossary.rst @@ -0,0 +1,4164 @@ +.. _glossary: + +======== +Glossary +======== + +This glossary offers a list of terms and definitions to define a +vocabulary for OpenStack-related concepts. + +To add to OpenStack glossary, clone the `openstack/openstack-manuals +repository +`__ and +update the source file ``doc/common/glossary.rst`` through the +OpenStack contribution process. + +0-9 +~~~ + +.. glossary:: + + 6to4 + + A mechanism that allows IPv6 packets to be transmitted + over an IPv4 network, providing a strategy for migrating to + IPv6. + +A +~ + +.. glossary:: + + absolute limit + + Impassable limits for guest VMs. Settings include total RAM + size, maximum number of vCPUs, and maximum disk size. + + access control list (ACL) + + A list of permissions attached to an object. An ACL specifies + which users or system processes have access to objects. It also + defines which operations can be performed on specified objects. Each + entry in a typical ACL specifies a subject and an operation. For + instance, the ACL entry ``(Alice, delete)`` for a file gives + Alice permission to delete the file. + + access key + + Alternative term for an Amazon EC2 access key. See EC2 access + key. + + account + + The Object Storage context of an account. Do not confuse with a + user account from an authentication service, such as Active Directory, + /etc/passwd, OpenLDAP, OpenStack Identity, and so on. + + account auditor + + Checks for missing replicas and incorrect or corrupted objects + in a specified Object Storage account by running queries against the + back-end SQLite database. + + account database + + A SQLite database that contains Object Storage accounts and + related metadata and that the accounts server accesses. + + account reaper + + An Object Storage worker that scans for and deletes account + databases and that the account server has marked for deletion. + + account server + + Lists containers in Object Storage and stores container + information in the account database. + + account service + + An Object Storage component that provides account services such + as list, create, modify, and audit. Do not confuse with OpenStack + Identity service, OpenLDAP, or similar user-account services. + + accounting + + The Compute service provides accounting information through the + event notification and system usage data facilities. + + Active Directory + + Authentication and identity service by Microsoft, based on LDAP. + Supported in OpenStack. + + active/active configuration + + In a high-availability setup with an active/active + configuration, several systems share the load together and if one + fails, the load is distributed to the remaining systems. + + active/passive configuration + + In a high-availability setup with an active/passive + configuration, systems are set up to bring additional resources online + to replace those that have failed. + + address pool + + A group of fixed and/or floating IP addresses that are assigned + to a project and can be used by or assigned to the VM instances in a + project. + + Address Resolution Protocol (ARP) + + The protocol by which layer-3 IP addresses are resolved into + layer-2 link local addresses. + + admin API + + A subset of API calls that are accessible to authorized + administrators and are generally not accessible to end users or the + public Internet. They can exist as a separate service (keystone) or + can be a subset of another API (nova). + + admin server + + In the context of the Identity service, the worker process that + provides access to the admin API. + + administrator + + The person responsible for installing, configuring, + and managing an OpenStack cloud. + + Advanced Message Queuing Protocol (AMQP) + + The open standard messaging protocol used by OpenStack + components for intra-service communications, provided by RabbitMQ, + Qpid, or ZeroMQ. + + Advanced RISC Machine (ARM) + + Lower power consumption CPU often found in mobile and embedded + devices. Supported by OpenStack. + + alert + + The Compute service can send alerts through its notification + system, which includes a facility to create custom notification + drivers. Alerts can be sent to and displayed on the dashboard. + + allocate + + The process of taking a floating IP address from the address + pool so it can be associated with a fixed IP on a guest VM + instance. + + Amazon Kernel Image (AKI) + + Both a VM container format and disk format. Supported by Image + service. + + Amazon Machine Image (AMI) + + Both a VM container format and disk format. Supported by Image + service. + + Amazon Ramdisk Image (ARI) + + Both a VM container format and disk format. Supported by Image + service. + + Anvil + + A project that ports the shell script-based project named + DevStack to Python. + + aodh + + Part of the OpenStack :term:`Telemetry service `; provides alarming functionality. + + Apache + + The Apache Software Foundation supports the Apache community of + open-source software projects. These projects provide software + products for the public good. + + Apache License 2.0 + + All OpenStack core projects are provided under the terms of the + Apache License 2.0 license. + + Apache Web Server + + The most common web server software currently used on the + Internet. + + API endpoint + + The daemon, worker, or service that a client communicates with + to access an API. API endpoints can provide any number of services, + such as authentication, sales data, performance meters, Compute VM + commands, census data, and so on. + + API extension + + Custom modules that extend some OpenStack core APIs. + + API extension plug-in + + Alternative term for a Networking plug-in or Networking API + extension. + + API key + + Alternative term for an API token. + + API server + + Any node running a daemon or worker that provides an API + endpoint. + + API token + + Passed to API requests and used by OpenStack to verify that the + client is authorized to run the requested operation. + + API version + + In OpenStack, the API version for a project is part of the URL. + For example, ``example.com/nova/v1/foobar``. + + applet + + A Java program that can be embedded into a web page. + + Application Catalog service (murano) + + The project that provides an application catalog service so that users + can compose and deploy composite environments on an application + abstraction level while managing the application lifecycle. + + Application Programming Interface (API) + + A collection of specifications used to access a service, + application, or program. Includes service calls, required parameters + for each call, and the expected return values. + + application server + + A piece of software that makes available another piece of + software over a network. + + Application Service Provider (ASP) + + Companies that rent specialized applications that help + businesses and organizations provide additional services + with lower cost. + + arptables + + Tool used for maintaining Address Resolution Protocol packet + filter rules in the Linux kernel firewall modules. Used along with + iptables, ebtables, and ip6tables in Compute to provide firewall + services for VMs. + + associate + + The process associating a Compute floating IP address with a + fixed IP address. + + Asynchronous JavaScript and XML (AJAX) + + A group of interrelated web development techniques used on the + client-side to create asynchronous web applications. Used extensively + in horizon. + + ATA over Ethernet (AoE) + + A disk storage protocol tunneled within Ethernet. + + attach + + The process of connecting a VIF or vNIC to a L2 network in + Networking. In the context of Compute, this process connects a storage + volume to an instance. + + attachment (network) + + Association of an interface ID to a logical port. Plugs an + interface into a port. + + auditing + + Provided in Compute through the system usage data + facility. + + auditor + + A worker process that verifies the integrity of Object Storage + objects, containers, and accounts. Auditors is the collective term for + the Object Storage account auditor, container auditor, and object + auditor. + + Austin + + The code name for the initial release of + OpenStack. The first design summit took place in + Austin, Texas, US. + + auth node + + Alternative term for an Object Storage authorization + node. + + authentication + + The process that confirms that the user, process, or client is + really who they say they are through private key, secret token, + password, fingerprint, or similar method. + + authentication token + + A string of text provided to the client after authentication. + Must be provided by the user or process in subsequent requests to the + API endpoint. + + AuthN + + The Identity service component that provides authentication + services. + + authorization + + The act of verifying that a user, process, or client is + authorized to perform an action. + + authorization node + + An Object Storage node that provides authorization + services. + + AuthZ + + The Identity component that provides high-level + authorization services. + + Auto ACK + + Configuration setting within RabbitMQ that enables or disables + message acknowledgment. Enabled by default. + + auto declare + + A Compute RabbitMQ setting that determines whether a message + exchange is automatically created when the program starts. + + availability zone + + An Amazon EC2 concept of an isolated area that is used for fault + tolerance. Do not confuse with an OpenStack Compute zone or + cell. + + AWS CloudFormation template + + AWS CloudFormation allows Amazon Web Services (AWS) users to create and manage a + collection of related resources. The Orchestration service + supports a CloudFormation-compatible format (CFN). + +B +~ + +.. glossary:: + + back end + + Interactions and processes that are obfuscated from the user, + such as Compute volume mount, data transmission to an iSCSI target by + a daemon, or Object Storage object integrity checks. + + back-end catalog + + The storage method used by the Identity service catalog service + to store and retrieve information about API endpoints that are + available to the client. Examples include an SQL database, LDAP + database, or KVS back end. + + back-end store + + The persistent data store used to save and retrieve information + for a service, such as lists of Object Storage objects, current state + of guest VMs, lists of user names, and so on. Also, the method that the + Image service uses to get and store VM images. Options include Object + Storage, locally mounted file system, RADOS block devices, VMware + datastore, and HTTP. + + Backup, Restore, and Disaster Recovery service (freezer) + + The project that provides integrated tooling for backing up, restoring, + and recovering file systems, instances, or database backups. + + bandwidth + + The amount of available data used by communication resources, + such as the Internet. Represents the amount of data that is used to + download things or the amount of data available to download. + + barbican + + Code name of the :term:`Key Manager service + `. + + bare + + An Image service container format that indicates that no + container exists for the VM image. + + Bare Metal service (ironic) + + The OpenStack service that provides a service and associated libraries + capable of managing and provisioning physical machines in a + security-aware and fault-tolerant manner. + + base image + + An OpenStack-provided image. + + Bell-LaPadula model + + A security model that focuses on data confidentiality + and controlled access to classified information. + This model divides the entities into subjects and objects. + The clearance of a subject is compared to the classification of the + object to determine if the subject is authorized for the specific access mode. + The clearance or classification scheme is expressed in terms of a lattice. + + Benchmark service (rally) + + OpenStack project that provides a framework for + performance analysis and benchmarking of individual + OpenStack components as well as full production OpenStack + cloud deployments. + + Bexar + + A grouped release of projects related to + OpenStack that came out in February of 2011. It + included only Compute (nova) and Object Storage (swift). + Bexar is the code name for the second release of + OpenStack. The design summit took place in + San Antonio, Texas, US, which is the county seat for Bexar county. + + binary + + Information that consists solely of ones and zeroes, which is + the language of computers. + + bit + + A bit is a single digit number that is in base of 2 (either a + zero or one). Bandwidth usage is measured in bits per second. + + bits per second (BPS) + + The universal measurement of how quickly data is transferred + from place to place. + + block device + + A device that moves data in the form of blocks. These device + nodes interface the devices, such as hard disks, CD-ROM drives, flash + drives, and other addressable regions of memory. + + block migration + + A method of VM live migration used by KVM to evacuate instances + from one host to another with very little downtime during a + user-initiated switchover. Does not require shared storage. Supported + by Compute. + + Block Storage API + + An API on a separate endpoint for attaching, + detaching, and creating block storage for compute + VMs. + + Block Storage service (cinder) + + The OpenStack service that implement services and libraries to provide + on-demand, self-service access to Block Storage resources via abstraction + and automation on top of other block storage devices. + + BMC (Baseboard Management Controller) + + The intelligence in the IPMI architecture, which is a specialized + micro-controller that is embedded on the motherboard of a computer + and acts as a server. Manages the interface between system management + software and platform hardware. + + bootable disk image + + A type of VM image that exists as a single, bootable + file. + + Bootstrap Protocol (BOOTP) + + A network protocol used by a network client to obtain an IP + address from a configuration server. Provided in Compute through the + dnsmasq daemon when using either the FlatDHCP manager or VLAN manager + network manager. + + Border Gateway Protocol (BGP) + + The Border Gateway Protocol is a dynamic routing protocol + that connects autonomous systems. Considered the + backbone of the Internet, this protocol connects disparate + networks to form a larger network. + + browser + + Any client software that enables a computer or device to access + the Internet. + + builder file + + Contains configuration information that Object Storage uses to + reconfigure a ring or to re-create it from scratch after a serious + failure. + + bursting + + The practice of utilizing a secondary environment to + elastically build instances on-demand when the primary + environment is resource constrained. + + button class + + A group of related button types within horizon. Buttons to + start, stop, and suspend VMs are in one class. Buttons to associate + and disassociate floating IP addresses are in another class, and so + on. + + byte + + Set of bits that make up a single character; there are usually 8 + bits to a byte. + +C +~ + +.. glossary:: + + cache pruner + + A program that keeps the Image service VM image cache at or + below its configured maximum size. + + Cactus + + An OpenStack grouped release of projects that came out in the + spring of 2011. It included Compute (nova), Object Storage (swift), + and the Image service (glance). + Cactus is a city in Texas, US and is the code name for + the third release of OpenStack. When OpenStack releases went + from three to six months long, the code name of the release + changed to match a geography nearest the previous + summit. + + CALL + + One of the RPC primitives used by the OpenStack message queue + software. Sends a message and waits for a response. + + capability + + Defines resources for a cell, including CPU, storage, and + networking. Can apply to the specific services within a cell or a + whole cell. + + capacity cache + + A Compute back-end database table that contains the current + workload, amount of free RAM, and number of VMs running on each host. + Used to determine on which host a VM starts. + + capacity updater + + A notification driver that monitors VM instances and updates the + capacity cache as needed. + + CAST + + One of the RPC primitives used by the OpenStack message queue + software. Sends a message and does not wait for a response. + + catalog + + A list of API endpoints that are available to a user after + authentication with the Identity service. + + catalog service + + An Identity service that lists API endpoints that are available + to a user after authentication with the Identity service. + + ceilometer + + Part of the OpenStack :term:`Telemetry service `; gathers and stores metrics from other + OpenStack services. + + cell + + Provides logical partitioning of Compute resources in a child + and parent relationship. Requests are passed from parent cells to + child cells if the parent cannot provide the requested + resource. + + cell forwarding + + A Compute option that enables parent cells to pass resource + requests to child cells if the parent cannot provide the requested + resource. + + cell manager + + The Compute component that contains a list of the current + capabilities of each host within the cell and routes requests as + appropriate. + + CentOS + + A Linux distribution that is compatible with OpenStack. + + Ceph + + Massively scalable distributed storage system that consists of + an object store, block store, and POSIX-compatible distributed file + system. Compatible with OpenStack. + + CephFS + + The POSIX-compliant file system provided by Ceph. + + certificate authority (CA) + + In cryptography, an entity that issues digital certificates. The digital + certificate certifies the ownership of a public key by the named + subject of the certificate. This enables others (relying parties) to + rely upon signatures or assertions made by the private key that + corresponds to the certified public key. In this model of trust + relationships, a CA is a trusted third party for both the subject + (owner) of the certificate and the party relying upon the certificate. + CAs are characteristic of many public key infrastructure (PKI) + schemes. + In OpenStack, a simple certificate authority is provided by Compute for + cloudpipe VPNs and VM image decryption. + + Challenge-Handshake Authentication Protocol (CHAP) + + An iSCSI authentication method supported by Compute. + + chance scheduler + + A scheduling method used by Compute that randomly chooses an + available host from the pool. + + changes since + + A Compute API parameter that downloads changes to the requested + item since your last request, instead of downloading a new, fresh set + of data and comparing it against the old data. + + Chef + + An operating system configuration management tool supporting + OpenStack deployments. + + child cell + + If a requested resource such as CPU time, disk storage, or + memory is not available in the parent cell, the request is forwarded + to its associated child cells. If the child cell can fulfill the + request, it does. Otherwise, it attempts to pass the request to any of + its children. + + cinder + + Codename for :term:`Block Storage service + `. + + CirrOS + + A minimal Linux distribution designed for use as a test + image on clouds such as OpenStack. + + Cisco neutron plug-in + + A Networking plug-in for Cisco devices and technologies, + including UCS and Nexus. + + cloud architect + + A person who plans, designs, and oversees the creation of + clouds. + + Cloud Auditing Data Federation (CADF) + + Cloud Auditing Data Federation (CADF) is a + specification for audit event data. CADF is + supported by OpenStack Identity. + + cloud computing + + A model that enables access to a shared pool of configurable + computing resources, such as networks, servers, storage, applications, + and services, that can be rapidly provisioned and released with + minimal management effort or service provider interaction. + + cloud controller + + Collection of Compute components that represent the global state + of the cloud; talks to services, such as Identity authentication, + Object Storage, and node/storage workers through a + queue. + + cloud controller node + + A node that runs network, volume, API, scheduler, and image + services. Each service may be broken out into separate nodes for + scalability or availability. + + Cloud Data Management Interface (CDMI) + + SINA standard that defines a RESTful API for managing objects in + the cloud, currently unsupported in OpenStack. + + Cloud Infrastructure Management Interface (CIMI) + + An in-progress specification for cloud management. Currently + unsupported in OpenStack. + + cloud-init + + A package commonly installed in VM images that performs + initialization of an instance after boot using information that it + retrieves from the metadata service, such as the SSH public key and + user data. + + cloudadmin + + One of the default roles in the Compute RBAC system. Grants + complete system access. + + Cloudbase-Init + + A Windows project providing guest initialization features, + similar to cloud-init. + + cloudpipe + + A compute service that creates VPNs on a per-project + basis. + + cloudpipe image + + A pre-made VM image that serves as a cloudpipe server. + Essentially, OpenVPN running on Linux. + + Clustering service (senlin) + + The project that implements clustering services and libraries + for the management of groups of homogeneous objects exposed + by other OpenStack services. + + command filter + + Lists allowed commands within the Compute rootwrap + facility. + + Common Internet File System (CIFS) + + A file sharing protocol. It is a public or open variation of the + original Server Message Block (SMB) protocol developed and used by + Microsoft. Like the SMB protocol, CIFS runs at a higher level and uses + the TCP/IP protocol. + + Common Libraries (oslo) + + The project that produces a set of python libraries containing code + shared by OpenStack projects. The APIs provided by these libraries + should be high quality, stable, consistent, documented and generally + applicable. + + community project + + A project that is not officially endorsed by the OpenStack + Foundation. If the project is successful enough, it might be elevated + to an incubated project and then to a core project, or it might be + merged with the main code trunk. + + compression + + Reducing the size of files by special encoding, the file can be + decompressed again to its original content. OpenStack supports + compression at the Linux file system level but does not support + compression for things such as Object Storage objects or Image service + VM images. + + Compute API (Nova API) + + The nova-api daemon provides access to nova services. Can communicate with + other APIs, such as the Amazon EC2 API. + + compute controller + + The Compute component that chooses suitable hosts on which to + start VM instances. + + compute host + + Physical host dedicated to running compute nodes. + + compute node + + A node that runs the nova-compute daemon that manages VM + instances that provide a wide + range of services, such as web applications and analytics. + + Compute service (nova) + + The OpenStack core project that implements services and associated + libraries to provide massively-scalable, on-demand, self-service + access to compute resources, including bare metal, virtual machines, + and containers. + + compute worker + + The Compute component that runs on each compute node and manages + the VM instance lifecycle, including run, reboot, terminate, + attach/detach volumes, and so on. Provided by the nova-compute daemon. + + concatenated object + + A set of segment objects that Object Storage combines and sends + to the client. + + conductor + + In Compute, conductor is the process that proxies database + requests from the compute process. Using conductor improves security + because compute nodes do not need direct access to the + database. + + congress + + Code name for the :term:`Governance service + `. + + consistency window + + The amount of time it takes for a new Object Storage object to + become accessible to all clients. + + console log + + Contains the output from a Linux VM console in Compute. + + container + + Organizes and stores objects in Object Storage. Similar to the + concept of a Linux directory but cannot be nested. Alternative term + for an Image service container format. + + container auditor + + Checks for missing replicas or incorrect objects in specified + Object Storage containers through queries to the SQLite back-end + database. + + container database + + A SQLite database that stores Object Storage containers and + container metadata. The container server accesses this + database. + + container format + + A wrapper used by the Image service that contains a VM image and + its associated metadata, such as machine state, OS disk size, and so + on. + + Container Infrastructure Management service (magnum) + + The project which provides a set of services for provisioning, scaling, + and managing container orchestration engines. + + container server + + An Object Storage server that manages containers. + + container service + + The Object Storage component that provides container services, + such as create, delete, list, and so on. + + content delivery network (CDN) + + A content delivery network is a specialized network that is + used to distribute content to clients, typically located + close to the client for increased performance. + + controller node + + Alternative term for a cloud controller node. + + core API + + Depending on context, the core API is either the OpenStack API + or the main API of a specific core project, such as Compute, + Networking, Image service, and so on. + + core service + + An official OpenStack service defined as core by + DefCore Committee. Currently, consists of + Block Storage service (cinder), Compute service (nova), + Identity service (keystone), Image service (glance), + Networking service (neutron), and Object Storage service (swift). + + cost + + Under the Compute distributed scheduler, this is calculated by + looking at the capabilities of each host relative to the flavor of the + VM instance being requested. + + credentials + + Data that is only known to or accessible by a user and + used to verify that the user is who he says he is. + Credentials are presented to the server during + authentication. Examples include a password, secret key, + digital certificate, and fingerprint. + + CRL + + A Certificate Revocation List (CRL) in a PKI model is a list of + certificates that have been revoked. End entities presenting + these certificates should not be trusted. + + Cross-Origin Resource Sharing (CORS) + + A mechanism that allows many resources (for example, + fonts, JavaScript) on a web page to be requested from + another domain outside the domain from which the resource + originated. In particular, JavaScript's AJAX calls can use + the XMLHttpRequest mechanism. + + Crowbar + + An open source community project by SUSE that aims to provide + all necessary services to quickly deploy and manage clouds. + + current workload + + An element of the Compute capacity cache that is calculated + based on the number of build, snapshot, migrate, and resize operations + currently in progress on a given host. + + customer + + Alternative term for project. + + customization module + + A user-created Python module that is loaded by horizon to change + the look and feel of the dashboard. + +D +~ + +.. glossary:: + + daemon + + A process that runs in the background and waits for requests. + May or may not listen on a TCP or UDP port. Do not confuse with a + worker. + + Dashboard (horizon) + + OpenStack project which provides an extensible, unified, web-based + user interface for all OpenStack services. + + data encryption + + Both Image service and Compute support encrypted virtual machine + (VM) images (but not instances). In-transit data encryption is + supported in OpenStack using technologies such as HTTPS, SSL, TLS, and + SSH. Object Storage does not support object encryption at the + application level but may support storage that uses disk encryption. + + Data loss prevention (DLP) software + + Software programs used to protect sensitive information + and prevent it from leaking outside a network boundary + through the detection and denying of the data transportation. + + Data Processing service (sahara) + + OpenStack project that provides a scalable + data-processing stack and associated management + interfaces. + + data store + + A database engine supported by the Database service. + + database ID + + A unique ID given to each replica of an Object Storage + database. + + database replicator + + An Object Storage component that copies changes in the account, + container, and object databases to other nodes. + + Database service (trove) + + An integrated project that provides scalable and reliable + Cloud Database-as-a-Service functionality for both + relational and non-relational database engines. + + deallocate + + The process of removing the association between a floating IP + address and a fixed IP address. Once this association is removed, the + floating IP returns to the address pool. + + Debian + + A Linux distribution that is compatible with OpenStack. + + deduplication + + The process of finding duplicate data at the disk block, file, + and/or object level to minimize storage use—currently unsupported + within OpenStack. + + default panel + + The default panel that is displayed when a user accesses the + dashboard. + + default project + + New users are assigned to this project if no project is specified + when a user is created. + + default token + + An Identity service token that is not associated with a specific + project and is exchanged for a scoped token. + + delayed delete + + An option within Image service so that an image is deleted after + a predefined number of seconds instead of immediately. + + delivery mode + + Setting for the Compute RabbitMQ message delivery mode; can be + set to either transient or persistent. + + denial of service (DoS) + + Denial of service (DoS) is a short form for + denial-of-service attack. This is a malicious attempt to + prevent legitimate users from using a service. + + deprecated auth + + An option within Compute that enables administrators to create + and manage users through the ``nova-manage`` command as + opposed to using the Identity service. + + designate + + Code name for the :term:`DNS service `. + + Desktop-as-a-Service + + A platform that provides a suite of desktop environments + that users access to receive a desktop experience from + any location. This may provide general use, development, or + even homogeneous testing environments. + + developer + + One of the default roles in the Compute RBAC system and the + default role assigned to a new user. + + device ID + + Maps Object Storage partitions to physical storage + devices. + + device weight + + Distributes partitions proportionately across Object Storage + devices based on the storage capacity of each device. + + DevStack + + Community project that uses shell scripts to quickly build + complete OpenStack development environments. + + DHCP agent + + OpenStack Networking agent that provides DHCP services + for virtual networks. + + Diablo + + A grouped release of projects related to OpenStack that came out + in the fall of 2011, the fourth release of OpenStack. It included + Compute (nova 2011.3), Object Storage (swift 1.4.3), and the Image + service (glance). + Diablo is the code name for the fourth release of + OpenStack. The design summit took place in + the Bay Area near Santa Clara, + California, US and Diablo is a nearby city. + + direct consumer + + An element of the Compute RabbitMQ that comes to life when a RPC + call is executed. It connects to a direct exchange through a unique + exclusive queue, sends the message, and terminates. + + direct exchange + + A routing table that is created within the Compute RabbitMQ + during RPC calls; one is created for each RPC call that is + invoked. + + direct publisher + + Element of RabbitMQ that provides a response to an incoming MQ + message. + + disassociate + + The process of removing the association between a floating IP + address and fixed IP and thus returning the floating IP address to the + address pool. + + Discretionary Access Control (DAC) + + Governs the ability of subjects to access objects, while enabling + users to make policy decisions and assign security attributes. + The traditional UNIX system of users, groups, and read-write-execute + permissions is an example of DAC. + + disk encryption + + The ability to encrypt data at the file system, disk partition, + or whole-disk level. Supported within Compute VMs. + + disk format + + The underlying format that a disk image for a VM is stored as + within the Image service back-end store. For example, AMI, ISO, QCOW2, + VMDK, and so on. + + dispersion + + In Object Storage, tools to test and ensure dispersion of + objects and containers to ensure fault tolerance. + + distributed virtual router (DVR) + + Mechanism for highly available multi-host routing when using + OpenStack Networking (neutron). + + Django + + A web framework used extensively in horizon. + + DNS record + + A record that specifies information about a particular domain + and belongs to the domain. + + DNS service (designate) + + OpenStack project that provides scalable, on demand, self + service access to authoritative DNS services, in a + technology-agnostic manner. + + dnsmasq + + Daemon that provides DNS, DHCP, BOOTP, and TFTP services for + virtual networks. + + domain + + An Identity API v3 entity. Represents a collection of + projects, groups and users that defines administrative boundaries for + managing OpenStack Identity entities. + On the Internet, separates a website from other sites. Often, + the domain name has two or more parts that are separated by dots. + For example, yahoo.com, usa.gov, harvard.edu, or + mail.yahoo.com. + Also, a domain is an entity or container of all DNS-related + information containing one or more records. + + Domain Name System (DNS) + + A system by which Internet domain name-to-address and + address-to-name resolutions are determined. + DNS helps navigate the Internet by translating the IP address + into an address that is easier to remember. For example, translating + 111.111.111.1 into www.yahoo.com. + All domains and their components, such as mail servers, utilize + DNS to resolve to the appropriate locations. DNS servers are usually + set up in a master-slave relationship such that failure of the master + invokes the slave. DNS servers might also be clustered or replicated + such that changes made to one DNS server are automatically propagated + to other active servers. + In Compute, the support that enables associating DNS entries + with floating IP addresses, nodes, or cells so that hostnames are + consistent across reboots. + + download + + The transfer of data, usually in the form of files, from one + computer to another. + + durable exchange + + The Compute RabbitMQ message exchange that remains active when + the server restarts. + + durable queue + + A Compute RabbitMQ message queue that remains active when the + server restarts. + + Dynamic Host Configuration Protocol (DHCP) + + A network protocol that configures devices that are connected to a + network so that they can communicate on that network by using the + Internet Protocol (IP). The protocol is implemented in a client-server + model where DHCP clients request configuration data, such as an IP + address, a default route, and one or more DNS server addresses from a + DHCP server. + A method to automatically configure networking for a host at + boot time. Provided by both Networking and Compute. + + Dynamic HyperText Markup Language (DHTML) + + Pages that use HTML, JavaScript, and Cascading Style Sheets to + enable users to interact with a web page or show simple + animation. + +E +~ + +.. glossary:: + + east-west traffic + + Network traffic between servers in the same cloud or data center. + See also north-south traffic. + + EBS boot volume + + An Amazon EBS storage volume that contains a bootable VM image, + currently unsupported in OpenStack. + + ebtables + + Filtering tool for a Linux bridging firewall, enabling + filtering of network traffic passing through a Linux bridge. + Used in Compute along with arptables, iptables, and ip6tables + to ensure isolation of network communications. + + EC2 + + The Amazon commercial compute product, similar to + Compute. + + EC2 access key + + Used along with an EC2 secret key to access the Compute EC2 + API. + + EC2 API + + OpenStack supports accessing the Amazon EC2 API through + Compute. + + EC2 Compatibility API + + A Compute component that enables OpenStack to communicate with + Amazon EC2. + + EC2 secret key + + Used along with an EC2 access key when communicating with the + Compute EC2 API; used to digitally sign each request. + + Elastic Block Storage (EBS) + + The Amazon commercial block storage product. + + encapsulation + + The practice of placing one packet type within another for + the purposes of abstracting or securing data. Examples + include GRE, MPLS, or IPsec. + + encryption + + OpenStack supports encryption technologies such as HTTPS, SSH, + SSL, TLS, digital certificates, and data encryption. + + endpoint + + See API endpoint. + + endpoint registry + + Alternative term for an Identity service catalog. + + endpoint template + + A list of URL and port number endpoints that indicate where a + service, such as Object Storage, Compute, Identity, and so on, can be + accessed. + + entity + + Any piece of hardware or software that wants to connect to the + network services provided by Networking, the network connectivity + service. An entity can make use of Networking by implementing a + VIF. + + ephemeral image + + A VM image that does not save changes made to its volumes and + reverts them to their original state after the instance is + terminated. + + ephemeral volume + + Volume that does not save the changes made to it and reverts to + its original state when the current user relinquishes control. + + Essex + + A grouped release of projects related to OpenStack that came out + in April 2012, the fifth release of OpenStack. It included Compute + (nova 2012.1), Object Storage (swift 1.4.8), Image (glance), Identity + (keystone), and Dashboard (horizon). + Essex is the code name for the fifth release of + OpenStack. The design summit took place in + Boston, Massachusetts, US and Essex is a nearby city. + + ESXi + + An OpenStack-supported hypervisor. + + ETag + + MD5 hash of an object within Object Storage, used to ensure data + integrity. + + euca2ools + + A collection of command-line tools for administering VMs; most + are compatible with OpenStack. + + Eucalyptus Kernel Image (EKI) + + Used along with an ERI to create an EMI. + + Eucalyptus Machine Image (EMI) + + VM image container format supported by Image service. + + Eucalyptus Ramdisk Image (ERI) + + Used along with an EKI to create an EMI. + + evacuate + + The process of migrating one or all virtual machine (VM) + instances from one host to another, compatible with both shared + storage live migration and block migration. + + exchange + + Alternative term for a RabbitMQ message exchange. + + exchange type + + A routing algorithm in the Compute RabbitMQ. + + exclusive queue + + Connected to by a direct consumer in RabbitMQ—Compute, the + message can be consumed only by the current connection. + + extended attributes (xattr) + + File system option that enables storage of additional + information beyond owner, group, permissions, modification time, and + so on. The underlying Object Storage file system must support extended + attributes. + + extension + + Alternative term for an API extension or plug-in. In the context + of Identity service, this is a call that is specific to the + implementation, such as adding support for OpenID. + + external network + + A network segment typically used for instance Internet + access. + + extra specs + + Specifies additional requirements when Compute determines where + to start a new instance. Examples include a minimum amount of network + bandwidth or a GPU. + +F +~ + +.. glossary:: + + FakeLDAP + + An easy method to create a local LDAP directory for testing + Identity and Compute. Requires Redis. + + fan-out exchange + + Within RabbitMQ and Compute, it is the messaging interface that + is used by the scheduler service to receive capability messages from + the compute, volume, and network nodes. + + federated identity + + A method to establish trusts between identity providers and the + OpenStack cloud. + + Fedora + + A Linux distribution compatible with OpenStack. + + Fibre Channel + + Storage protocol similar in concept to TCP/IP; encapsulates SCSI + commands and data. + + Fibre Channel over Ethernet (FCoE) + + The fibre channel protocol tunneled within Ethernet. + + fill-first scheduler + + The Compute scheduling method that attempts to fill a host with + VMs rather than starting new VMs on a variety of hosts. + + filter + + The step in the Compute scheduling process when hosts that + cannot run VMs are eliminated and not chosen. + + firewall + + Used to restrict communications between hosts and/or nodes, + implemented in Compute using iptables, arptables, ip6tables, and + ebtables. + + FireWall-as-a-Service (FWaaS) + + A Networking extension that provides perimeter firewall + functionality. + + fixed IP address + + An IP address that is associated with the same instance each + time that instance boots, is generally not accessible to end users or + the public Internet, and is used for management of the + instance. + + Flat Manager + + The Compute component that gives IP addresses to authorized + nodes and assumes DHCP, DNS, and routing configuration and services + are provided by something else. + + flat mode injection + + A Compute networking method where the OS network configuration + information is injected into the VM image before the instance + starts. + + flat network + + Virtual network type that uses neither VLANs nor tunnels to + segregate project traffic. Each flat network typically requires + a separate underlying physical interface defined by bridge + mappings. However, a flat network can contain multiple + subnets. + + FlatDHCP Manager + + The Compute component that provides dnsmasq (DHCP, DNS, BOOTP, + TFTP) and radvd (routing) services. + + flavor + + Alternative term for a VM instance type. + + flavor ID + + UUID for each Compute or Image service VM flavor or instance + type. + + floating IP address + + An IP address that a project can associate with a VM so that the + instance has the same public IP address each time that it boots. You + create a pool of floating IP addresses and assign them to instances as + they are launched to maintain a consistent IP address for maintaining + DNS assignment. + + Folsom + + A grouped release of projects related to OpenStack that came out + in the fall of 2012, the sixth release of OpenStack. It includes + Compute (nova), Object Storage (swift), Identity (keystone), + Networking (neutron), Image service (glance), and Volumes or Block + Storage (cinder). + Folsom is the code name for the sixth release of + OpenStack. The design summit took place in + San Francisco, California, US and Folsom is a nearby city. + + FormPost + + Object Storage middleware that uploads (posts) an image through + a form on a web page. + + freezer + + Code name for the :term:`Backup, Restore, and Disaster Recovery service + `. + + front end + + The point where a user interacts with a service; can be an API + endpoint, the dashboard, or a command-line tool. + +G +~ + +.. glossary:: + + gateway + + An IP address, typically assigned to a router, that + passes network traffic between different networks. + + generic receive offload (GRO) + + Feature of certain network interface drivers that + combines many smaller received packets into a large packet + before delivery to the kernel IP stack. + + generic routing encapsulation (GRE) + + Protocol that encapsulates a wide variety of network + layer protocols inside virtual point-to-point links. + + glance + + Codename for the :term:`Image service`. + + glance API server + + Alternative name for the :term:`Image API`. + + glance registry + + Alternative term for the Image service :term:`image registry`. + + global endpoint template + + The Identity service endpoint template that contains services + available to all projects. + + GlusterFS + + A file system designed to aggregate NAS hosts, compatible with + OpenStack. + + gnocchi + + Part of the OpenStack :term:`Telemetry service `; provides an indexer and time-series + database. + + golden image + + A method of operating system installation where a finalized disk + image is created and then used by all nodes without + modification. + + Governance service (congress) + + The project that provides Governance-as-a-Service across + any collection of cloud services in order to monitor, + enforce, and audit policy over dynamic infrastructure. + + Graphic Interchange Format (GIF) + + A type of image file that is commonly used for animated images + on web pages. + + Graphics Processing Unit (GPU) + + Choosing a host based on the existence of a GPU is currently + unsupported in OpenStack. + + Green Threads + + The cooperative threading model used by Python; reduces race + conditions and only context switches when specific library calls are + made. Each OpenStack service is its own thread. + + Grizzly + + The code name for the seventh release of + OpenStack. The design summit took place in + San Diego, California, US and Grizzly is an element of the state flag of + California. + + Group + + An Identity v3 API entity. Represents a collection of users that is + owned by a specific domain. + + guest OS + + An operating system instance running under the control of a + hypervisor. + +H +~ + +.. glossary:: + + Hadoop + + Apache Hadoop is an open source software framework that supports + data-intensive distributed applications. + + Hadoop Distributed File System (HDFS) + + A distributed, highly fault-tolerant file system designed to run + on low-cost commodity hardware. + + handover + + An object state in Object Storage where a new replica of the + object is automatically created due to a drive failure. + + HAProxy + + Provides a load balancer for TCP and HTTP-based applications that + spreads requests across multiple servers. + + hard reboot + + A type of reboot where a physical or virtual power button is + pressed as opposed to a graceful, proper shutdown of the operating + system. + + Havana + + The code name for the eighth release of OpenStack. The + design summit took place in Portland, Oregon, US and Havana is + an unincorporated community in Oregon. + + health monitor + + Determines whether back-end members of a VIP pool can + process a request. A pool can have several health monitors + associated with it. When a pool has several monitors + associated with it, all monitors check each member of the + pool. All monitors must declare a member to be healthy for + it to stay active. + + heat + Codename for the :term:`Orchestration service + `. + + Heat Orchestration Template (HOT) + + Heat input in the format native to OpenStack. + + high availability (HA) + + A high availability system design approach and associated + service implementation ensures that a prearranged level of + operational performance will be met during a contractual + measurement period. High availability systems seek to + minimize system downtime and data loss. + + horizon + + Codename for the :term:`Dashboard `. + + horizon plug-in + + A plug-in for the OpenStack Dashboard (horizon). + + host + + A physical computer, not a VM instance (node). + + host aggregate + + A method to further subdivide availability zones into hypervisor + pools, a collection of common hosts. + + Host Bus Adapter (HBA) + + Device plugged into a PCI slot, such as a fibre channel or + network card. + + hybrid cloud + + A hybrid cloud is a composition of two or more clouds + (private, community or public) that remain distinct entities + but are bound together, offering the benefits of multiple + deployment models. Hybrid cloud can also mean the ability + to connect colocation, managed and/or dedicated services + with cloud resources. + + Hyper-V + + One of the hypervisors supported by OpenStack. + + hyperlink + + Any kind of text that contains a link to some other site, + commonly found in documents where clicking on a word or words opens up + a different website. + + Hypertext Transfer Protocol (HTTP) + + An application protocol for distributed, collaborative, + hypermedia information systems. It is the foundation of data + communication for the World Wide Web. Hypertext is structured + text that uses logical links (hyperlinks) between nodes containing + text. HTTP is the protocol to exchange or transfer hypertext. + + Hypertext Transfer Protocol Secure (HTTPS) + + An encrypted communications protocol for secure communication + over a computer network, with especially wide deployment on the + Internet. Technically, it is not a protocol in and of itself; + rather, it is the result of simply layering the Hypertext Transfer + Protocol (HTTP) on top of the TLS or SSL protocol, thus adding the + security capabilities of TLS or SSL to standard HTTP communications. + Most OpenStack API endpoints and many inter-component communications + support HTTPS communication. + + hypervisor + + Software that arbitrates and controls VM access to the actual + underlying hardware. + + hypervisor pool + + A collection of hypervisors grouped together through host + aggregates. + +I +~ + +.. glossary:: + + Icehouse + + The code name for the ninth release of OpenStack. The + design summit took place in Hong Kong and Ice House is a + street in that city. + + ID number + + Unique numeric ID associated with each user in Identity, + conceptually similar to a Linux or LDAP UID. + + Identity API + + Alternative term for the Identity service API. + + Identity back end + + The source used by Identity service to retrieve user + information; an OpenLDAP server, for example. + + identity provider + + A directory service, which allows users to login with a user + name and password. It is a typical source of authentication + tokens. + + Identity service (keystone) + + The project that facilitates API client authentication, service + discovery, distributed multi-project authorization, and auditing. + It provides a central directory of users mapped to the OpenStack + services they can access. It also registers endpoints for OpenStack + services and acts as a common authentication system. + + Identity service API + + The API used to access the OpenStack Identity service provided + through keystone. + + IETF + + Internet Engineering Task Force (IETF) is an open standards + organization that develops Internet standards, particularly the + standards pertaining to TCP/IP. + + image + + A collection of files for a specific operating system (OS) that + you use to create or rebuild a server. OpenStack provides pre-built + images. You can also create custom images, or snapshots, from servers + that you have launched. Custom images can be used for data backups or + as "gold" images for additional servers. + + Image API + + The Image service API endpoint for management of VM + images. + Processes client requests for VMs, updates Image service + metadata on the registry server, and communicates with the store + adapter to upload VM images from the back-end store. + + image cache + + Used by Image service to obtain images on the local host rather + than re-downloading them from the image server each time one is + requested. + + image ID + + Combination of a URI and UUID used to access Image service VM + images through the image API. + + image membership + + A list of projects that can access a given VM image within Image + service. + + image owner + + The project who owns an Image service virtual machine + image. + + image registry + + A list of VM images that are available through Image + service. + + Image service (glance) + + The OpenStack service that provide services and associated libraries + to store, browse, share, distribute and manage bootable disk images, + other data closely associated with initializing compute resources, + and metadata definitions. + + image status + + The current status of a VM image in Image service, not to be + confused with the status of a running instance. + + image store + + The back-end store used by Image service to store VM images, + options include Object Storage, locally mounted file system, + RADOS block devices, VMware datastore, or HTTP. + + image UUID + + UUID used by Image service to uniquely identify each VM + image. + + incubated project + + A community project may be elevated to this status and is then + promoted to a core project. + + Infrastructure Optimization service (watcher) + + OpenStack project that aims to provide a flexible and scalable resource + optimization service for multi-project OpenStack-based clouds. + + Infrastructure-as-a-Service (IaaS) + + IaaS is a provisioning model in which an organization outsources + physical components of a data center, such as storage, hardware, + servers, and networking components. A service provider owns the + equipment and is responsible for housing, operating and maintaining + it. The client typically pays on a per-use basis. + IaaS is a model for providing cloud services. + + ingress filtering + + The process of filtering incoming network traffic. Supported by + Compute. + + INI format + + The OpenStack configuration files use an INI format to + describe options and their values. It consists of sections + and key value pairs. + + injection + + The process of putting a file into a virtual machine image + before the instance is started. + + Input/Output Operations Per Second (IOPS) + + IOPS are a common performance measurement used to benchmark computer + storage devices like hard disk drives, solid state drives, and + storage area networks. + + instance + + A running VM, or a VM in a known state such as suspended, that + can be used like a hardware server. + + instance ID + + Alternative term for instance UUID. + + instance state + + The current state of a guest VM image. + + instance tunnels network + + A network segment used for instance traffic tunnels + between compute nodes and the network node. + + instance type + + Describes the parameters of the various virtual machine images + that are available to users; includes parameters such as CPU, storage, + and memory. Alternative term for flavor. + + instance type ID + + Alternative term for a flavor ID. + + instance UUID + + Unique ID assigned to each guest VM instance. + + Intelligent Platform Management Interface (IPMI) + + IPMI is a standardized computer system interface used by system + administrators for out-of-band management of computer systems and + monitoring of their operation. In layman's terms, it is a way to + manage a computer using a direct network connection, whether it is + turned on or not; connecting to the hardware rather than an operating + system or login shell. + + interface + + A physical or virtual device that provides connectivity + to another device or medium. + + interface ID + + Unique ID for a Networking VIF or vNIC in the form of a + UUID. + + Internet Control Message Protocol (ICMP) + + A network protocol used by network devices for control messages. + For example, :command:`ping` uses ICMP to test + connectivity. + + Internet protocol (IP) + + Principal communications protocol in the internet protocol + suite for relaying datagrams across network boundaries. + + Internet Service Provider (ISP) + + Any business that provides Internet access to individuals or + businesses. + + Internet Small Computer System Interface (iSCSI) + + Storage protocol that encapsulates SCSI frames for transport + over IP networks. + Supported by Compute, Object Storage, and Image service. + + IP address + + Number that is unique to every computer system on the Internet. + Two versions of the Internet Protocol (IP) are in use for addresses: + IPv4 and IPv6. + + IP Address Management (IPAM) + + The process of automating IP address allocation, deallocation, + and management. Currently provided by Compute, melange, and + Networking. + + ip6tables + + Tool used to set up, maintain, and inspect the tables of IPv6 + packet filter rules in the Linux kernel. In OpenStack Compute, + ip6tables is used along with arptables, ebtables, and iptables to + create firewalls for both nodes and VMs. + + ipset + + Extension to iptables that allows creation of firewall rules + that match entire "sets" of IP addresses simultaneously. These + sets reside in indexed data structures to increase efficiency, + particularly on systems with a large quantity of rules. + + iptables + + Used along with arptables and ebtables, iptables create + firewalls in Compute. iptables are the tables provided by the Linux + kernel firewall (implemented as different Netfilter modules) and the + chains and rules it stores. Different kernel modules and programs are + currently used for different protocols: iptables applies to IPv4, + ip6tables to IPv6, arptables to ARP, and ebtables to Ethernet frames. + Requires root privilege to manipulate. + + ironic + + Codename for the :term:`Bare Metal service `. + + iSCSI Qualified Name (IQN) + + IQN is the format most commonly used for iSCSI names, which uniquely + identify nodes in an iSCSI network. + All IQNs follow the pattern iqn.yyyy-mm.domain:identifier, where + 'yyyy-mm' is the year and month in which the domain was registered, + 'domain' is the reversed domain name of the issuing organization, and + 'identifier' is an optional string which makes each IQN under the same + domain unique. For example, 'iqn.2015-10.org.openstack.408ae959bce1'. + + ISO9660 + + One of the VM image disk formats supported by Image + service. + + itsec + + A default role in the Compute RBAC system that can quarantine an + instance in any project. + +J +~ + +.. glossary:: + + Java + + A programming language that is used to create systems that + involve more than one computer by way of a network. + + JavaScript + + A scripting language that is used to build web pages. + + JavaScript Object Notation (JSON) + + One of the supported response formats in OpenStack. + + jumbo frame + + Feature in modern Ethernet networks that supports frames up to + approximately 9000 bytes. + + Juno + + The code name for the tenth release of OpenStack. The + design summit took place in Atlanta, Georgia, US and Juno is + an unincorporated community in Georgia. + +K +~ + +.. glossary:: + + Kerberos + + A network authentication protocol which works on the basis of + tickets. Kerberos allows nodes communication over a non-secure + network, and allows nodes to prove their identity to one another in a + secure manner. + + kernel-based VM (KVM) + + An OpenStack-supported hypervisor. KVM is a full + virtualization solution for Linux on x86 hardware containing + virtualization extensions (Intel VT or AMD-V), ARM, IBM + Power, and IBM zSeries. It consists of a loadable kernel + module, that provides the core virtualization infrastructure + and a processor specific module. + + Key Manager service (barbican) + + The project that produces a secret storage and + generation system capable of providing key management for + services wishing to enable encryption features. + + keystone + + Codename of the :term:`Identity service `. + + Kickstart + + A tool to automate system configuration and installation on Red + Hat, Fedora, and CentOS-based Linux distributions. + + Kilo + + The code name for the eleventh release of OpenStack. The + design summit took place in Paris, France. Due to delays in the name + selection, the release was known only as K. Because ``k`` is the + unit symbol for kilo and the kilogram reference artifact is stored + near Paris in the Pavillon de Breteuil in Sèvres, the community + chose Kilo as the release name. + +L +~ + +.. glossary:: + + large object + + An object within Object Storage that is larger than 5 GB. + + Launchpad + + The collaboration site for OpenStack. + + Layer-2 (L2) agent + + OpenStack Networking agent that provides layer-2 + connectivity for virtual networks. + + Layer-2 network + + Term used in the OSI network architecture for the data link + layer. The data link layer is responsible for media access + control, flow control and detecting and possibly correcting + errors that may occur in the physical layer. + + Layer-3 (L3) agent + + OpenStack Networking agent that provides layer-3 + (routing) services for virtual networks. + + Layer-3 network + + Term used in the OSI network architecture for the network + layer. The network layer is responsible for packet + forwarding including routing from one node to another. + + Liberty + + The code name for the twelfth release of OpenStack. The + design summit took place in Vancouver, Canada and Liberty is + the name of a village in the Canadian province of + Saskatchewan. + + libvirt + + Virtualization API library used by OpenStack to interact with + many of its supported hypervisors. + + Lightweight Directory Access Protocol (LDAP) + + An application protocol for accessing and maintaining distributed + directory information services over an IP network. + + Linux + + Unix-like computer operating system assembled under the model of + free and open-source software development and distribution. + + Linux bridge + + Software that enables multiple VMs to share a single physical + NIC within Compute. + + Linux Bridge neutron plug-in + + Enables a Linux bridge to understand a Networking port, + interface attachment, and other abstractions. + + Linux containers (LXC) + + An OpenStack-supported hypervisor. + + live migration + + The ability within Compute to move running virtual machine + instances from one host to another with only a small service + interruption during switchover. + + load balancer + + A load balancer is a logical device that belongs to a cloud + account. It is used to distribute workloads between multiple back-end + systems or services, based on the criteria defined as part of its + configuration. + + load balancing + + The process of spreading client requests between two or more + nodes to improve performance and availability. + + Load-Balancer-as-a-Service (LBaaS) + + Enables Networking to distribute incoming requests evenly + between designated instances. + + Load-balancing service (octavia) + + The project that aims to provide scalable, on demand, self service + access to load-balancer services, in technology-agnostic manner. + + Logical Volume Manager (LVM) + + Provides a method of allocating space on mass-storage + devices that is more flexible than conventional partitioning + schemes. + +M +~ + +.. glossary:: + + magnum + + Code name for the :term:`Containers Infrastructure Management + service`. + + management API + + Alternative term for an admin API. + + management network + + A network segment used for administration, not accessible to the + public Internet. + + manager + + Logical groupings of related code, such as the Block Storage + volume manager or network manager. + + manifest + + Used to track segments of a large object within Object + Storage. + + manifest object + + A special Object Storage object that contains the manifest for a + large object. + + manila + + Codename for OpenStack :term:`Shared File Systems service`. + + manila-share + + Responsible for managing Shared File System Service devices, specifically + the back-end devices. + + maximum transmission unit (MTU) + + Maximum frame or packet size for a particular network + medium. Typically 1500 bytes for Ethernet networks. + + mechanism driver + + A driver for the Modular Layer 2 (ML2) neutron plug-in that + provides layer-2 connectivity for virtual instances. A + single OpenStack installation can use multiple mechanism + drivers. + + melange + + Project name for OpenStack Network Information Service. To be + merged with Networking. + + membership + + The association between an Image service VM image and a project. + Enables images to be shared with specified projects. + + membership list + + A list of projects that can access a given VM image within Image + service. + + memcached + + A distributed memory object caching system that is used by + Object Storage for caching. + + memory overcommit + + The ability to start new VM instances based on the actual memory + usage of a host, as opposed to basing the decision on the amount of + RAM each running instance thinks it has available. Also known as RAM + overcommit. + + message broker + + The software package used to provide AMQP messaging capabilities + within Compute. Default package is RabbitMQ. + + message bus + + The main virtual communication line used by all AMQP messages + for inter-cloud communications within Compute. + + message queue + + Passes requests from clients to the appropriate workers and + returns the output to the client after the job completes. + + Message service (zaqar) + + The project that provides a messaging service that affords a + variety of distributed application patterns in an efficient, + scalable and highly available manner, and to create and maintain + associated Python libraries and documentation. + + Meta-Data Server (MDS) + + Stores CephFS metadata. + + Metadata agent + + OpenStack Networking agent that provides metadata + services for instances. + + migration + + The process of moving a VM instance from one host to + another. + + mistral + + Code name for :term:`Workflow service `. + + Mitaka + + The code name for the thirteenth release of OpenStack. + The design summit took place in Tokyo, Japan. Mitaka + is a city in Tokyo. + + Modular Layer 2 (ML2) neutron plug-in + + Can concurrently use multiple layer-2 networking technologies, + such as 802.1Q and VXLAN, in Networking. + + monasca + + Codename for OpenStack :term:`Monitoring `. + + Monitor (LBaaS) + + LBaaS feature that provides availability monitoring using the + ``ping`` command, TCP, and HTTP/HTTPS GET. + + Monitor (Mon) + + A Ceph component that communicates with external clients, checks + data state and consistency, and performs quorum functions. + + Monitoring (monasca) + + The OpenStack service that provides a multi-project, highly scalable, + performant, fault-tolerant monitoring-as-a-service solution for metrics, + complex event processing and logging. To build an extensible platform for + advanced monitoring services that can be used by both operators and + projects to gain operational insight and visibility, ensuring availability + and stability. + + multi-factor authentication + + Authentication method that uses two or more credentials, such as + a password and a private key. Currently not supported in + Identity. + + multi-host + + High-availability mode for legacy (nova) networking. + Each compute node handles NAT and DHCP and acts as a gateway + for all of the VMs on it. A networking failure on one compute + node doesn't affect VMs on other compute nodes. + + multinic + + Facility in Compute that allows each virtual machine instance to + have more than one VIF connected to it. + + murano + + Codename for the :term:`Application Catalog service `. + +N +~ + +.. glossary:: + + Nebula + + Released as open source by NASA in 2010 and is the basis for + Compute. + + netadmin + + One of the default roles in the Compute RBAC system. Enables the + user to allocate publicly accessible IP addresses to instances and + change firewall rules. + + NetApp volume driver + + Enables Compute to communicate with NetApp storage devices + through the NetApp OnCommand + Provisioning Manager. + + network + + A virtual network that provides connectivity between entities. + For example, a collection of virtual ports that share network + connectivity. In Networking terminology, a network is always a layer-2 + network. + + Network Address Translation (NAT) + + Process of modifying IP address information while in transit. + Supported by Compute and Networking. + + network controller + + A Compute daemon that orchestrates the network configuration of + nodes, including IP addresses, VLANs, and bridging. Also manages + routing for both public and private networks. + + Network File System (NFS) + + A method for making file systems available over the network. + Supported by OpenStack. + + network ID + + Unique ID assigned to each network segment within Networking. + Same as network UUID. + + network manager + + The Compute component that manages various network components, + such as firewall rules, IP address allocation, and so on. + + network namespace + + Linux kernel feature that provides independent virtual + networking instances on a single host with separate routing + tables and interfaces. Similar to virtual routing and forwarding + (VRF) services on physical network equipment. + + network node + + Any compute node that runs the network worker daemon. + + network segment + + Represents a virtual, isolated OSI layer-2 subnet in + Networking. + + Network Service Header (NSH) + + Provides a mechanism for metadata exchange along the + instantiated service path. + + Network Time Protocol (NTP) + + Method of keeping a clock for a host or node correct via + communication with a trusted, accurate time source. + + network UUID + + Unique ID for a Networking network segment. + + network worker + + The ``nova-network`` worker daemon; provides + services such as giving an IP address to a booting nova + instance. + + Networking API (Neutron API) + + API used to access OpenStack Networking. Provides an extensible + architecture to enable custom plug-in creation. + + Networking service (neutron) + + The OpenStack project which implements services and associated + libraries to provide on-demand, scalable, and technology-agnostic + network abstraction. + + neutron + + Codename for OpenStack :term:`Networking service `. + + neutron API + + An alternative name for :term:`Networking API `. + + neutron manager + + Enables Compute and Networking integration, which enables + Networking to perform network management for guest VMs. + + neutron plug-in + + Interface within Networking that enables organizations to create + custom plug-ins for advanced features, such as QoS, ACLs, or + IDS. + + Newton + + The code name for the fourteenth release of OpenStack. The + design summit took place in Austin, Texas, US. The + release is named after "Newton House" which is located at + 1013 E. Ninth St., Austin, TX. which is listed on the + National Register of Historic Places. + + Nexenta volume driver + + Provides support for NexentaStor devices in Compute. + + NFV Orchestration Service (tacker) + + OpenStack service that aims to implement Network Function Virtualization + (NFV) orchestration services and libraries for end-to-end life-cycle + management of network services and Virtual Network Functions (VNFs). + + Nginx + + An HTTP and reverse proxy server, a mail proxy server, and a generic + TCP/UDP proxy server. + + No ACK + + Disables server-side message acknowledgment in the Compute + RabbitMQ. Increases performance but decreases reliability. + + node + + A VM instance that runs on a host. + + non-durable exchange + + Message exchange that is cleared when the service restarts. Its + data is not written to persistent storage. + + non-durable queue + + Message queue that is cleared when the service restarts. Its + data is not written to persistent storage. + + non-persistent volume + + Alternative term for an ephemeral volume. + + north-south traffic + + Network traffic between a user or client (north) and a + server (south), or traffic into the cloud (south) and + out of the cloud (north). See also east-west traffic. + + nova + + Codename for OpenStack :term:`Compute service `. + + Nova API + + Alternative term for the :term:`Compute API `. + + nova-network + + A Compute component that manages IP address allocation, + firewalls, and other network-related tasks. This is the legacy + networking option and an alternative to Networking. + +O +~ + +.. glossary:: + + object + + A BLOB of data held by Object Storage; can be in any + format. + + object auditor + + Opens all objects for an object server and verifies the MD5 + hash, size, and metadata for each object. + + object expiration + + A configurable option within Object Storage to automatically + delete objects after a specified amount of time has passed or a + certain date is reached. + + object hash + + Unique ID for an Object Storage object. + + object path hash + + Used by Object Storage to determine the location of an object in + the ring. Maps objects to partitions. + + object replicator + + An Object Storage component that copies an object to remote + partitions for fault tolerance. + + object server + + An Object Storage component that is responsible for managing + objects. + + Object Storage API + + API used to access OpenStack :term:`Object Storage`. + + Object Storage Device (OSD) + + The Ceph storage daemon. + + Object Storage service (swift) + + The OpenStack core project that provides eventually consistent + and redundant storage and retrieval of fixed digital content. + + object versioning + + Allows a user to set a flag on an :term:`Object Storage` container so that all objects within the container are + versioned. + + Ocata + + The code name for the fifteenth release of OpenStack. The + design summit will take place in Barcelona, Spain. Ocata is + a beach north of Barcelona. + + Octavia + + Code name for the :term:`Load-balancing service + `. + + Oldie + + Term for an :term:`Object Storage` + process that runs for a long time. Can indicate a hung process. + + Open Cloud Computing Interface (OCCI) + + A standardized interface for managing compute, data, and network + resources, currently unsupported in OpenStack. + + Open Virtualization Format (OVF) + + Standard for packaging VM images. Supported in OpenStack. + + Open vSwitch + + Open vSwitch is a production quality, multilayer virtual + switch licensed under the open source Apache 2.0 license. It + is designed to enable massive network automation through + programmatic extension, while still supporting standard + management interfaces and protocols (for example NetFlow, + sFlow, SPAN, RSPAN, CLI, LACP, 802.1ag). + + Open vSwitch (OVS) agent + + Provides an interface to the underlying Open vSwitch service for + the Networking plug-in. + + Open vSwitch neutron plug-in + + Provides support for Open vSwitch in Networking. + + OpenLDAP + + An open source LDAP server. Supported by both Compute and + Identity. + + OpenStack + + OpenStack is a cloud operating system that controls large pools + of compute, storage, and networking resources throughout a data + center, all managed through a dashboard that gives administrators + control while empowering their users to provision resources through a + web interface. OpenStack is an open source project licensed under the + Apache License 2.0. + + OpenStack code name + + Each OpenStack release has a code name. Code names ascend in + alphabetical order: Austin, Bexar, Cactus, Diablo, Essex, + Folsom, Grizzly, Havana, Icehouse, Juno, Kilo, Liberty, + Mitaka, Newton, Ocata, Pike, Queens, and Rocky. + Code names are cities or counties near where the + corresponding OpenStack design summit took place. An + exception, called the Waldon exception, is granted to + elements of the state flag that sound especially cool. Code + names are chosen by popular vote. + + openSUSE + + A Linux distribution that is compatible with OpenStack. + + operator + + The person responsible for planning and maintaining an OpenStack + installation. + + optional service + + An official OpenStack service defined as optional by + DefCore Committee. Currently, consists of + Dashboard (horizon), Telemetry service (Telemetry), + Orchestration service (heat), Database service (trove), + Bare Metal service (ironic), and so on. + + Orchestration service (heat) + + The OpenStack service which orchestrates composite cloud + applications using a declarative template format through + an OpenStack-native REST API. + + orphan + + In the context of Object Storage, this is a process that is not + terminated after an upgrade, restart, or reload of the service. + + Oslo + + Codename for the :term:`Common Libraries project`. + +P +~ + +.. glossary:: + + panko + + Part of the OpenStack :term:`Telemetry service `; provides event storage. + + parent cell + + If a requested resource, such as CPU time, disk storage, or + memory, is not available in the parent cell, the request is forwarded + to associated child cells. + + partition + + A unit of storage within Object Storage used to store objects. + It exists on top of devices and is replicated for fault + tolerance. + + partition index + + Contains the locations of all Object Storage partitions within + the ring. + + partition shift value + + Used by Object Storage to determine which partition data should + reside on. + + path MTU discovery (PMTUD) + + Mechanism in IP networks to detect end-to-end MTU and adjust + packet size accordingly. + + pause + + A VM state where no changes occur (no changes in memory, network + communications stop, etc); the VM is frozen but not shut down. + + PCI passthrough + + Gives guest VMs exclusive access to a PCI device. Currently + supported in OpenStack Havana and later releases. + + persistent message + + A message that is stored both in memory and on disk. The message + is not lost after a failure or restart. + + persistent volume + + Changes to these types of disk volumes are saved. + + personality file + + A file used to customize a Compute instance. It can be used to + inject SSH keys or a specific network configuration. + + Pike + + The code name for the sixteenth release of OpenStack. The design + summit will take place in Boston, Massachusetts, US. The release + is named after the Massachusetts Turnpike, abbreviated commonly + as the Mass Pike, which is the easternmost stretch of + Interstate 90. + + Platform-as-a-Service (PaaS) + + Provides to the consumer an operating system and, often, a + language runtime and libraries (collectively, the "platform") + upon which they can run their own application code, without + providing any control over the underlying infrastructure. + Examples of Platform-as-a-Service providers include Cloud Foundry + and OpenShift. + + plug-in + + Software component providing the actual implementation for + Networking APIs, or for Compute APIs, depending on the context. + + policy service + + Component of Identity that provides a rule-management + interface and a rule-based authorization engine. + + policy-based routing (PBR) + + Provides a mechanism to implement packet forwarding and routing + according to the policies defined by the network administrator. + + pool + + A logical set of devices, such as web servers, that you + group together to receive and process traffic. The load + balancing function chooses which member of the pool handles + the new requests or connections received on the VIP + address. Each VIP has one pool. + + pool member + + An application that runs on the back-end server in a + load-balancing system. + + port + + A virtual network port within Networking; VIFs / vNICs are + connected to a port. + + port UUID + + Unique ID for a Networking port. + + preseed + + A tool to automate system configuration and installation on + Debian-based Linux distributions. + + private image + + An Image service VM image that is only available to specified + projects. + + private IP address + + An IP address used for management and administration, not + available to the public Internet. + + private network + + The Network Controller provides virtual networks to enable + compute servers to interact with each other and with the public + network. All machines must have a public and private network + interface. A private network interface can be a flat or VLAN network + interface. A flat network interface is controlled by the + flat_interface with flat managers. A VLAN network interface is + controlled by the ``vlan_interface`` option with VLAN + managers. + + project + + Projects represent the base unit of “ownership” in OpenStack, + in that all resources in OpenStack should be owned by a specific project. + In OpenStack Identity, a project must be owned by a specific domain. + + project ID + + Unique ID assigned to each project by the Identity service. + + project VPN + + Alternative term for a cloudpipe. + + promiscuous mode + + Causes the network interface to pass all traffic it + receives to the host rather than passing only the frames + addressed to it. + + protected property + + Generally, extra properties on an Image service image to + which only cloud administrators have access. Limits which user + roles can perform CRUD operations on that property. The cloud + administrator can configure any image property as + protected. + + provider + + An administrator who has access to all hosts and + instances. + + proxy node + + A node that provides the Object Storage proxy service. + + proxy server + + Users of Object Storage interact with the service through the + proxy server, which in turn looks up the location of the requested + data within the ring and returns the results to the user. + + public API + + An API endpoint used for both service-to-service communication + and end-user interactions. + + public image + + An Image service VM image that is available to all + projects. + + public IP address + + An IP address that is accessible to end-users. + + public key authentication + + Authentication method that uses keys rather than + passwords. + + public network + + The Network Controller provides virtual networks to enable + compute servers to interact with each other and with the public + network. All machines must have a public and private network + interface. The public network interface is controlled by the + ``public_interface`` option. + + Puppet + + An operating system configuration-management tool supported by + OpenStack. + + Python + + Programming language used extensively in OpenStack. + +Q +~ + +.. glossary:: + + QEMU Copy On Write 2 (QCOW2) + + One of the VM image disk formats supported by Image + service. + + Qpid + + Message queue software supported by OpenStack; an alternative to + RabbitMQ. + + Quality of Service (QoS) + + The ability to guarantee certain network or storage requirements to + satisfy a Service Level Agreement (SLA) between an application provider + and end users. + Typically includes performance requirements like networking bandwidth, + latency, jitter correction, and reliability as well as storage + performance in Input/Output Operations Per Second (IOPS), throttling + agreements, and performance expectations at peak load. + + quarantine + + If Object Storage finds objects, containers, or accounts that + are corrupt, they are placed in this state, are not replicated, cannot + be read by clients, and a correct copy is re-replicated. + + Queens + + The code name for the seventeenth release of OpenStack. The + design summit will take place in Sydney, Australia. The release + is named after the Queens Pound river in the South Coast region + of New South Wales. + + Quick EMUlator (QEMU) + + QEMU is a generic and open source machine emulator and + virtualizer. + One of the hypervisors supported by OpenStack, generally used + for development purposes. + + quota + + In Compute and Block Storage, the ability to set resource limits + on a per-project basis. + +R +~ + +.. glossary:: + + RabbitMQ + + The default message queue software used by OpenStack. + + Rackspace Cloud Files + + Released as open source by Rackspace in 2010; the basis for + Object Storage. + + RADOS Block Device (RBD) + + Ceph component that enables a Linux block device to be striped + over multiple distributed data stores. + + radvd + + The router advertisement daemon, used by the Compute VLAN + manager and FlatDHCP manager to provide routing services for VM + instances. + + rally + + Codename for the :term:`Benchmark service`. + + RAM filter + + The Compute setting that enables or disables RAM + overcommitment. + + RAM overcommit + + The ability to start new VM instances based on the actual memory + usage of a host, as opposed to basing the decision on the amount of + RAM each running instance thinks it has available. Also known as + memory overcommit. + + rate limit + + Configurable option within Object Storage to limit database + writes on a per-account and/or per-container basis. + + raw + + One of the VM image disk formats supported by Image service; an + unstructured disk image. + + rebalance + + The process of distributing Object Storage partitions across all + drives in the ring; used during initial ring creation and after ring + reconfiguration. + + reboot + + Either a soft or hard reboot of a server. With a soft reboot, + the operating system is signaled to restart, which enables a graceful + shutdown of all processes. A hard reboot is the equivalent of power + cycling the server. The virtualization platform should ensure that the + reboot action has completed successfully, even in cases in which the + underlying domain/VM is paused or halted/stopped. + + rebuild + + Removes all data on the server and replaces it with the + specified image. Server ID and IP addresses remain the same. + + Recon + + An Object Storage component that collects meters. + + record + + Belongs to a particular domain and is used to specify + information about the domain. + There are several types of DNS records. Each record type contains + particular information used to describe the purpose of that record. + Examples include mail exchange (MX) records, which specify the mail + server for a particular domain; and name server (NS) records, which + specify the authoritative name servers for a domain. + + record ID + + A number within a database that is incremented each time a + change is made. Used by Object Storage when replicating. + + Red Hat Enterprise Linux (RHEL) + + A Linux distribution that is compatible with OpenStack. + + reference architecture + + A recommended architecture for an OpenStack cloud. + + region + + A discrete OpenStack environment with dedicated API endpoints + that typically shares only the Identity (keystone) with other + regions. + + registry + + Alternative term for the Image service registry. + + registry server + + An Image service that provides VM image metadata information to + clients. + + Reliable, Autonomic Distributed Object Store + (RADOS) + + A collection of components that provides object storage within + Ceph. Similar to OpenStack Object Storage. + + Remote Procedure Call (RPC) + + The method used by the Compute RabbitMQ for intra-service + communications. + + replica + + Provides data redundancy and fault tolerance by creating copies + of Object Storage objects, accounts, and containers so that they are + not lost when the underlying storage fails. + + replica count + + The number of replicas of the data in an Object Storage + ring. + + replication + + The process of copying data to a separate physical device for + fault tolerance and performance. + + replicator + + The Object Storage back-end process that creates and manages + object replicas. + + request ID + + Unique ID assigned to each request sent to Compute. + + rescue image + + A special type of VM image that is booted when an instance is + placed into rescue mode. Allows an administrator to mount the file + systems for an instance to correct the problem. + + resize + + Converts an existing server to a different flavor, which scales + the server up or down. The original server is saved to enable rollback + if a problem occurs. All resizes must be tested and explicitly + confirmed, at which time the original server is removed. + + RESTful + + A kind of web service API that uses REST, or Representational + State Transfer. REST is the style of architecture for hypermedia + systems that is used for the World Wide Web. + + ring + + An entity that maps Object Storage data to partitions. A + separate ring exists for each service, such as account, object, and + container. + + ring builder + + Builds and manages rings within Object Storage, assigns + partitions to devices, and pushes the configuration to other storage + nodes. + + Rocky + + The code name for the eightteenth release of OpenStack. The + design summit will take place in Vancouver, Kanada. The release + is named after the Rocky Mountains. + + role + + A personality that a user assumes to perform a specific set of + operations. A role includes a set of rights and privileges. A user + assuming that role inherits those rights and privileges. + + Role Based Access Control (RBAC) + + Provides a predefined list of actions that the user can perform, + such as start or stop VMs, reset passwords, and so on. Supported in + both Identity and Compute and can be configured using the dashboard. + + role ID + + Alphanumeric ID assigned to each Identity service role. + + Root Cause Analysis (RCA) service (Vitrage) + + OpenStack project that aims to organize, analyze and visualize OpenStack + alarms and events, yield insights regarding the root cause of problems + and deduce their existence before they are directly detected. + + rootwrap + + A feature of Compute that allows the unprivileged "nova" user to + run a specified list of commands as the Linux root user. + + round-robin scheduler + + Type of Compute scheduler that evenly distributes instances + among available hosts. + + router + + A physical or virtual network device that passes network + traffic between different networks. + + routing key + + The Compute direct exchanges, fanout exchanges, and topic + exchanges use this key to determine how to process a message; + processing varies depending on exchange type. + + RPC driver + + Modular system that allows the underlying message queue software + of Compute to be changed. For example, from RabbitMQ to ZeroMQ or + Qpid. + + rsync + + Used by Object Storage to push object replicas. + + RXTX cap + + Absolute limit on the amount of network traffic a Compute VM + instance can send and receive. + + RXTX quota + + Soft limit on the amount of network traffic a Compute VM + instance can send and receive. + +S +~ + +.. glossary:: + + sahara + + Codename for the :term:`Data Processing service`. + + SAML assertion + + Contains information about a user as provided by the identity + provider. It is an indication that a user has been authenticated. + + scheduler manager + + A Compute component that determines where VM instances should + start. Uses modular design to support a variety of scheduler + types. + + scoped token + + An Identity service API access token that is associated with a + specific project. + + scrubber + + Checks for and deletes unused VMs; the component of Image + service that implements delayed delete. + + secret key + + String of text known only by the user; used along with an access + key to make requests to the Compute API. + + secure boot + + Process whereby the system firmware validates the authenticity of + the code involved in the boot process. + + secure shell (SSH) + + Open source tool used to access remote hosts through an + encrypted communications channel, SSH key injection is supported by + Compute. + + security group + + A set of network traffic filtering rules that are applied to a + Compute instance. + + segmented object + + An Object Storage large object that has been broken up into + pieces. The re-assembled object is called a concatenated + object. + + self-service + + For IaaS, ability for a regular (non-privileged) account to + manage a virtual infrastructure component such as networks without + involving an administrator. + + SELinux + + Linux kernel security module that provides the mechanism for + supporting access control policies. + + senlin + + Code name for the :term:`Clustering service + `. + + server + + Computer that provides explicit services to the client software + running on that system, often managing a variety of computer + operations. + A server is a VM instance in the Compute system. Flavor and + image are requisite elements when creating a server. + + server image + + Alternative term for a VM image. + + server UUID + + Unique ID assigned to each guest VM instance. + + service + + An OpenStack service, such as Compute, Object Storage, or Image + service. Provides one or more endpoints through which users can access + resources and perform operations. + + service catalog + + Alternative term for the Identity service catalog. + + Service Function Chain (SFC) + + For a given service, SFC is the abstracted view of the required + service functions and the order in which they are to be applied. + + service ID + + Unique ID assigned to each service that is available in the + Identity service catalog. + + Service Level Agreement (SLA) + + Contractual obligations that ensure the availability of a + service. + + service project + + Special project that contains all services that are listed in the + catalog. + + service provider + + A system that provides services to other system entities. In + case of federated identity, OpenStack Identity is the service + provider. + + service registration + + An Identity service feature that enables services, such as + Compute, to automatically register with the catalog. + + service token + + An administrator-defined token used by Compute to communicate + securely with the Identity service. + + session back end + + The method of storage used by horizon to track client sessions, + such as local memory, cookies, a database, or memcached. + + session persistence + + A feature of the load-balancing service. It attempts to force + subsequent connections to a service to be redirected to the same node + as long as it is online. + + session storage + + A horizon component that stores and tracks client session + information. Implemented through the Django sessions framework. + + share + + A remote, mountable file system in the context of the :term:`Shared + File Systems service`. You can + mount a share to, and access a share from, several hosts by several + users at a time. + + share network + + An entity in the context of the :term:`Shared File Systems + service` that encapsulates + interaction with the Networking service. If the driver you selected + runs in the mode requiring such kind of interaction, you need to + specify the share network to create a share. + + Shared File Systems API + + A Shared File Systems service that provides a stable RESTful API. + The service authenticates and routes requests throughout the Shared + File Systems service. There is python-manilaclient to interact with + the API. + + Shared File Systems service (manila) + + The service that provides a set of services for + management of shared file systems in a multi-project cloud + environment, similar to how OpenStack provides block-based storage + management through the OpenStack :term:`Block Storage service` project. + With the Shared File Systems service, you can create a remote file + system and mount the file system on your instances. You can also + read and write data from your instances to and from your file system. + + shared IP address + + An IP address that can be assigned to a VM instance within the + shared IP group. Public IP addresses can be shared across multiple + servers for use in various high-availability scenarios. When an IP + address is shared to another server, the cloud network restrictions + are modified to enable each server to listen to and respond on that IP + address. You can optionally specify that the target server network + configuration be modified. Shared IP addresses can be used with many + standard heartbeat facilities, such as keepalive, that monitor for + failure and manage IP failover. + + shared IP group + + A collection of servers that can share IPs with other members of + the group. Any server in a group can share one or more public IPs with + any other server in the group. With the exception of the first server + in a shared IP group, servers must be launched into shared IP groups. + A server may be a member of only one shared IP group. + + shared storage + + Block storage that is simultaneously accessible by multiple + clients, for example, NFS. + + Sheepdog + + Distributed block storage system for QEMU, supported by + OpenStack. + + Simple Cloud Identity Management (SCIM) + + Specification for managing identity in the cloud, currently + unsupported by OpenStack. + + Simple Protocol for Independent Computing Environments (SPICE) + + SPICE provides remote desktop access to guest virtual machines. It + is an alternative to VNC. SPICE is supported by OpenStack. + + Single-root I/O Virtualization (SR-IOV) + + A specification that, when implemented by a physical PCIe + device, enables it to appear as multiple separate PCIe devices. This + enables multiple virtualized guests to share direct access to the + physical device, offering improved performance over an equivalent + virtual device. Currently supported in OpenStack Havana and later + releases. + + SmokeStack + + Runs automated tests against the core OpenStack API; written in + Rails. + + snapshot + + A point-in-time copy of an OpenStack storage volume or image. + Use storage volume snapshots to back up volumes. Use image snapshots + to back up data, or as "gold" images for additional servers. + + soft reboot + + A controlled reboot where a VM instance is properly restarted + through operating system commands. + + Software Development Lifecycle Automation service (solum) + + OpenStack project that aims to make cloud services easier to + consume and integrate with application development process + by automating the source-to-image process, and simplifying + app-centric deployment. + + Software-defined networking (SDN) + + Provides an approach for network administrators to manage computer + network services through abstraction of lower-level functionality. + + SolidFire Volume Driver + + The Block Storage driver for the SolidFire iSCSI storage + appliance. + + solum + + Code name for the :term:`Software Development Lifecycle Automation + service `. + + spread-first scheduler + + The Compute VM scheduling algorithm that attempts to start a new + VM on the host with the least amount of load. + + SQLAlchemy + + An open source SQL toolkit for Python, used in OpenStack. + + SQLite + + A lightweight SQL database, used as the default persistent + storage method in many OpenStack services. + + stack + + A set of OpenStack resources created and managed by the + Orchestration service according to a given template (either an + AWS CloudFormation template or a Heat Orchestration + Template (HOT)). + + StackTach + + Community project that captures Compute AMQP communications; + useful for debugging. + + static IP address + + Alternative term for a fixed IP address. + + StaticWeb + + WSGI middleware component of Object Storage that serves + container data as a static web page. + + storage back end + + The method that a service uses for persistent storage, such as + iSCSI, NFS, or local disk. + + storage manager + + A XenAPI component that provides a pluggable interface to + support a wide variety of persistent storage back ends. + + storage manager back end + + A persistent storage method supported by XenAPI, such as iSCSI + or NFS. + + storage node + + An Object Storage node that provides container services, account + services, and object services; controls the account databases, + container databases, and object storage. + + storage services + + Collective name for the Object Storage object services, + container services, and account services. + + strategy + + Specifies the authentication source used by Image service or + Identity. In the Database service, it refers to the extensions + implemented for a data store. + + subdomain + + A domain within a parent domain. Subdomains cannot be + registered. Subdomains enable you to delegate domains. Subdomains can + themselves have subdomains, so third-level, fourth-level, fifth-level, + and deeper levels of nesting are possible. + + subnet + + Logical subdivision of an IP network. + + SUSE Linux Enterprise Server (SLES) + + A Linux distribution that is compatible with OpenStack. + + suspend + + The VM instance is paused and its state is saved to disk of the host. + + swap + + Disk-based virtual memory used by operating systems to provide + more memory than is actually available on the system. + + swauth + + An authentication and authorization service for Object Storage, + implemented through WSGI middleware; uses Object Storage itself as the + persistent backing store. + + swift + + Codename for OpenStack :term:`Object Storage service`. + + swift All in One (SAIO) + + Creates a full Object Storage development environment within a + single VM. + + swift middleware + + Collective term for Object Storage components that provide + additional functionality. + + swift proxy server + + Acts as the gatekeeper to Object Storage and is responsible for + authenticating the user. + + swift storage node + + A node that runs Object Storage account, container, and object + services. + + sync point + + Point in time since the last container and accounts database + sync among nodes within Object Storage. + + sysadmin + + One of the default roles in the Compute RBAC system. Enables a + user to add other users to a project, interact with VM images that are + associated with the project, and start and stop VM instances. + + system usage + + A Compute component that, along with the notification system, + collects meters and usage information. This information can be used + for billing. + +T +~ + +.. glossary:: + + tacker + + Code name for the :term:`NFV Orchestration service ` + + Telemetry service (telemetry) + + The OpenStack project which collects measurements of the utilization + of the physical and virtual resources comprising deployed clouds, + persists this data for subsequent retrieval and analysis, and triggers + actions when defined criteria are met. + + TempAuth + + An authentication facility within Object Storage that enables + Object Storage itself to perform authentication and authorization. + Frequently used in testing and development. + + Tempest + + Automated software test suite designed to run against the trunk + of the OpenStack core project. + + TempURL + + An Object Storage middleware component that enables creation of + URLs for temporary object access. + + tenant + + A group of users; used to isolate access to Compute resources. + An alternative term for a project. + + Tenant API + + An API that is accessible to projects. + + tenant endpoint + + An Identity service API endpoint that is associated with one or + more projects. + + tenant ID + + An alternative term for :term:`project ID`. + + token + + An alpha-numeric string of text used to access OpenStack APIs + and resources. + + token services + + An Identity service component that manages and validates tokens + after a user or project has been authenticated. + + tombstone + + Used to mark Object Storage objects that have been + deleted; ensures that the object is not updated on another node after + it has been deleted. + + topic publisher + + A process that is created when a RPC call is executed; used to + push the message to the topic exchange. + + Torpedo + + Community project used to run automated tests against the + OpenStack API. + + transaction ID + + Unique ID assigned to each Object Storage request; used for + debugging and tracing. + + transient + + Alternative term for non-durable. + + transient exchange + + Alternative term for a non-durable exchange. + + transient message + + A message that is stored in memory and is lost after the server + is restarted. + + transient queue + + Alternative term for a non-durable queue. + + TripleO + + OpenStack-on-OpenStack program. The code name for the + OpenStack Deployment program. + + trove + + Codename for OpenStack :term:`Database service `. + + trusted platform module (TPM) + + Specialized microprocessor for incorporating cryptographic keys + into devices for authenticating and securing a hardware platform. + +U +~ + +.. glossary:: + + Ubuntu + + A Debian-based Linux distribution. + + unscoped token + + Alternative term for an Identity service default token. + + updater + + Collective term for a group of Object Storage components that + processes queued and failed updates for containers and objects. + + user + + In OpenStack Identity, entities represent individual API + consumers and are owned by a specific domain. In OpenStack Compute, + a user can be associated with roles, projects, or both. + + user data + + A blob of data that the user can specify when they launch + an instance. The instance can access this data through the + metadata service or config drive. + Commonly used to pass a shell script that the instance runs on boot. + + User Mode Linux (UML) + + An OpenStack-supported hypervisor. + +V +~ + +.. glossary:: + + VIF UUID + + Unique ID assigned to each Networking VIF. + + Virtual Central Processing Unit (vCPU) + + Subdivides physical CPUs. Instances can then use those + divisions. + + Virtual Disk Image (VDI) + + One of the VM image disk formats supported by Image + service. + + Virtual Extensible LAN (VXLAN) + + A network virtualization technology that attempts to reduce the + scalability problems associated with large cloud computing + deployments. It uses a VLAN-like encapsulation technique to + encapsulate Ethernet frames within UDP packets. + + Virtual Hard Disk (VHD) + + One of the VM image disk formats supported by Image + service. + + virtual IP address (VIP) + + An Internet Protocol (IP) address configured on the load + balancer for use by clients connecting to a service that is load + balanced. Incoming connections are distributed to back-end nodes based + on the configuration of the load balancer. + + virtual machine (VM) + + An operating system instance that runs on top of a hypervisor. + Multiple VMs can run at the same time on the same physical + host. + + virtual network + + An L2 network segment within Networking. + + Virtual Network Computing (VNC) + + Open source GUI and CLI tools used for remote console access to + VMs. Supported by Compute. + + Virtual Network InterFace (VIF) + + An interface that is plugged into a port in a Networking + network. Typically a virtual network interface belonging to a + VM. + + virtual networking + + A generic term for virtualization of network functions + such as switching, routing, load balancing, and security using + a combination of VMs and overlays on physical network + infrastructure. + + virtual port + + Attachment point where a virtual interface connects to a virtual + network. + + virtual private network (VPN) + + Provided by Compute in the form of cloudpipes, specialized + instances that are used to create VPNs on a per-project basis. + + virtual server + + Alternative term for a VM or guest. + + virtual switch (vSwitch) + + Software that runs on a host or node and provides the features + and functions of a hardware-based network switch. + + virtual VLAN + + Alternative term for a virtual network. + + VirtualBox + + An OpenStack-supported hypervisor. + + Vitrage + + Code name for the :term:`Root Cause Analysis service `. + + VLAN manager + + A Compute component that provides dnsmasq and radvd and sets up + forwarding to and from cloudpipe instances. + + VLAN network + + The Network Controller provides virtual networks to enable + compute servers to interact with each other and with the public + network. All machines must have a public and private network + interface. A VLAN network is a private network interface, which is + controlled by the ``vlan_interface`` option with VLAN + managers. + + VM disk (VMDK) + + One of the VM image disk formats supported by Image + service. + + VM image + + Alternative term for an image. + + VM Remote Control (VMRC) + + Method to access VM instance consoles using a web browser. + Supported by Compute. + + VMware API + + Supports interaction with VMware products in Compute. + + VMware NSX Neutron plug-in + + Provides support for VMware NSX in Neutron. + + VNC proxy + + A Compute component that provides users access to the consoles + of their VM instances through VNC or VMRC. + + volume + + Disk-based data storage generally represented as an iSCSI target + with a file system that supports extended attributes; can be + persistent or ephemeral. + + Volume API + + Alternative name for the Block Storage API. + + volume controller + + A Block Storage component that oversees and coordinates storage + volume actions. + + volume driver + + Alternative term for a volume plug-in. + + volume ID + + Unique ID applied to each storage volume under the Block Storage + control. + + volume manager + + A Block Storage component that creates, attaches, and detaches + persistent storage volumes. + + volume node + + A Block Storage node that runs the cinder-volume daemon. + + volume plug-in + + Provides support for new and specialized types of back-end + storage for the Block Storage volume manager. + + volume worker + + A cinder component that interacts with back-end storage to manage + the creation and deletion of volumes and the creation of compute + volumes, provided by the cinder-volume daemon. + + vSphere + + An OpenStack-supported hypervisor. + +W +~ + +.. glossary:: + + Watcher + + Code name for the :term:`Infrastructure Optimization service + `. + + weight + + Used by Object Storage devices to determine which storage + devices are suitable for the job. Devices are weighted by size. + + weighted cost + + The sum of each cost used when deciding where to start a new VM + instance in Compute. + + weighting + + A Compute process that determines the suitability of the VM + instances for a job for a particular host. For example, not enough RAM + on the host, too many CPUs on the host, and so on. + + worker + + A daemon that listens to a queue and carries out tasks in + response to messages. For example, the cinder-volume worker manages volume + creation and deletion on storage arrays. + + Workflow service (mistral) + + The OpenStack service that provides a simple YAML-based language to + write workflows (tasks and transition rules) and a service that + allows to upload them, modify, run them at scale and in a highly + available manner, manage and monitor workflow execution state and state + of individual tasks. + +X +~ + +.. glossary:: + + X.509 + + X.509 is the most widely used standard for defining digital + certificates. It is a data structure that contains the subject + (entity) identifiable information such as its name along with + its public key. The certificate can contain a few other + attributes as well depending upon the version. The most recent + and standard version of X.509 is v3. + + Xen + + Xen is a hypervisor using a microkernel design, providing + services that allow multiple computer operating systems to + execute on the same computer hardware concurrently. + + Xen API + + The Xen administrative API, which is supported by + Compute. + + Xen Cloud Platform (XCP) + + An OpenStack-supported hypervisor. + + Xen Storage Manager Volume Driver + + A Block Storage volume plug-in that enables communication with + the Xen Storage Manager API. + + XenServer + + An OpenStack-supported hypervisor. + + XFS + + High-performance 64-bit file system created by Silicon + Graphics. Excels in parallel I/O operations and data + consistency. + +Z +~ + +.. glossary:: + + zaqar + + Codename for the :term:`Message service `. + + ZeroMQ + + Message queue software supported by OpenStack. An alternative to + RabbitMQ. Also spelled 0MQ. + + Zuul + + Tool used in OpenStack development to ensure correctly ordered + testing of changes in parallel. diff --git a/doc/common/source/conf.py b/doc/common/source/conf.py new file mode 100644 index 00000000..b8c87846 --- /dev/null +++ b/doc/common/source/conf.py @@ -0,0 +1,110 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +# import sys + + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['openstackdocstheme'] + +# Add any paths that contain templates here, relative to this directory. +# templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +repository_name = "openstack/openstack-manuals" +bug_project = 'openstack-manuals' +project = u'Common documents' +bug_tag = u'common' + +copyright = u'2015-2018, OpenStack contributors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '' +# The full version, including alpha/beta/rc tags. +release = '' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# -- Options for Internationalization output ------------------------------ +locale_dirs = ['locale/'] diff --git a/doc/source/common b/doc/source/common new file mode 120000 index 00000000..60d3b0a6 --- /dev/null +++ b/doc/source/common @@ -0,0 +1 @@ +../common \ No newline at end of file diff --git a/doc/source/compute-node-ha.rst b/doc/source/compute-node-ha.rst new file mode 100644 index 00000000..b1cf30bc --- /dev/null +++ b/doc/source/compute-node-ha.rst @@ -0,0 +1,55 @@ +============================ +Configuring the compute node +============================ + +The `Installation Guides +`_ +provide instructions for installing multiple compute nodes. +To make the compute nodes highly available, you must configure the +environment to include multiple instances of the API and other services. + +Configuring high availability for instances +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As of September 2016, the OpenStack High Availability community is +designing and developing an official and unified way to provide high +availability for instances. We are developing automatic +recovery from failures of hardware or hypervisor-related software on +the compute node, or other failures that could prevent instances from +functioning correctly, such as, issues with a cinder volume I/O path. + +More details are available in the `user story +`_ +co-authored by OpenStack's HA community and `Product Working Group +`_ (PWG), where this feature is +identified as missing functionality in OpenStack, which +should be addressed with high priority. + +Existing solutions +~~~~~~~~~~~~~~~~~~ + +The architectural challenges of instance HA and several currently +existing solutions were presented in `a talk at the Austin summit +`_, +for which `slides are also available `_. + +The code for three of these solutions can be found online at the following +links: + +* `a mistral-based auto-recovery workflow + `_, by Intel +* `masakari `_, by NTT +* `OCF RAs + `_, + as used by Red Hat and SUSE + +Current upstream work +~~~~~~~~~~~~~~~~~~~~~ + +Work is in progress on a unified approach, which combines the best +aspects of existing upstream solutions. More details are available on +`the HA VMs user story wiki +`_. + +To get involved with this work, see the section on the +:doc:`ha-community`. diff --git a/doc/source/conf.py b/doc/source/conf.py index 2d8316b1..63276d66 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,3 +1,16 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # This file is execfile()d with the current directory set to its # containing dir. # @@ -8,8 +21,7 @@ # serve to show the default. import os - -import openstackdocstheme +# import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -26,6 +38,15 @@ import openstackdocstheme # ones. extensions = ['openstackdocstheme'] +# Add any paths that contain templates here, relative to this directory. +# templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + # The master toctree document. master_doc = 'index' @@ -36,12 +57,97 @@ project = u'High Availability Guide' bug_tag = u'ha-guide' copyright = u'2016-present, OpenStack contributors' +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '' +# The full version, including alpha/beta/rc tags. +release = '' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['common/cli*', 'common/nova*', + 'common/get-started*', 'common/dashboard*'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + + # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + 'display_badge': False +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [openstackdocstheme.get_html_theme_path()] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = [] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # So that we can enable "log-a-bug" links from each output HTML page, this @@ -49,6 +155,73 @@ html_theme = 'openstackdocs' # minutes. html_last_updated_fmt = '%Y-%m-%d %H:%M' +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +html_use_index = False + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +html_show_sourcelink = False + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ha-guide' + +# If true, publish source files +html_copy_source = False + +# -- Options for LaTeX output --------------------------------------------- + +latex_engine = 'xelatex' + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # set font (TODO: different fonts for translated PDF document builds) + 'fontenc': '\\usepackage{fontspec}', + 'fontpkg': '''\ +\defaultfontfeatures{Scale=MatchLowercase} +\setmainfont{Liberation Serif} +\setsansfont{Liberation Sans} +\setmonofont[SmallCapsFont={Liberation Mono}]{Liberation Mono} +''', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # 'preamble': '', +} + # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). @@ -57,5 +230,63 @@ latex_documents = [ u'OpenStack contributors', 'manual'), ] +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'haguide', u'High Availability Guide', + [u'OpenStack contributors'], 1) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'HAGuide', u'High Availability Guide', + u'OpenStack contributors', 'HAGuide', + 'This guide shows OpenStack operators and deployers how to configure' + 'OpenStack to be robust and fault-tolerant.', 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] diff --git a/doc/source/control-plane-stateful.rst b/doc/source/control-plane-stateful.rst new file mode 100644 index 00000000..11168a15 --- /dev/null +++ b/doc/source/control-plane-stateful.rst @@ -0,0 +1,342 @@ +================================= +Configuring the stateful services +================================= +.. to do: scope how in depth we want these sections to be + +Database for high availability +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Galera +------ + +The first step is to install the database that sits at the heart of the +cluster. To implement high availability, run an instance of the database on +each controller node and use Galera Cluster to provide replication between +them. Galera Cluster is a synchronous multi-master database cluster, based +on MySQL and the InnoDB storage engine. It is a high-availability service +that provides high system uptime, no data loss, and scalability for growth. + +You can achieve high availability for the OpenStack database in many +different ways, depending on the type of database that you want to use. +There are three implementations of Galera Cluster available to you: + +- `Galera Cluster for MySQL `_: The MySQL + reference implementation from Codership, Oy. +- `MariaDB Galera Cluster `_: The MariaDB + implementation of Galera Cluster, which is commonly supported in + environments based on Red Hat distributions. +- `Percona XtraDB Cluster `_: The XtraDB + implementation of Galera Cluster from Percona. + +In addition to Galera Cluster, you can also achieve high availability +through other database options, such as PostgreSQL, which has its own +replication system. + +Pacemaker active/passive with HAproxy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Replicated storage +------------------ + +For example: DRBD + +Shared storage +-------------- + +Messaging service for high availability +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +RabbitMQ +-------- + +An AMQP (Advanced Message Queuing Protocol) compliant message bus is +required for most OpenStack components in order to coordinate the +execution of jobs entered into the system. + +The most popular AMQP implementation used in OpenStack installations +is RabbitMQ. + +RabbitMQ nodes fail over on the application and the infrastructure layers. + +The application layer is controlled by the ``oslo.messaging`` +configuration options for multiple AMQP hosts. If the AMQP node fails, +the application reconnects to the next one configured within the +specified reconnect interval. The specified reconnect interval +constitutes its SLA. + +On the infrastructure layer, the SLA is the time for which RabbitMQ +cluster reassembles. Several cases are possible. The Mnesia keeper +node is the master of the corresponding Pacemaker resource for +RabbitMQ. When it fails, the result is a full AMQP cluster downtime +interval. Normally, its SLA is no more than several minutes. Failure +of another node that is a slave of the corresponding Pacemaker +resource for RabbitMQ results in no AMQP cluster downtime at all. + +.. until we've determined the content depth, I've transferred RabbitMQ + configuration below from the old HA guide (darrenc) + +Making the RabbitMQ service highly available involves the following steps: + +- :ref:`Install RabbitMQ` + +- :ref:`Configure RabbitMQ for HA queues` + +- :ref:`Configure OpenStack services to use RabbitMQ HA queues + ` + +.. note:: + + Access to RabbitMQ is not normally handled by HAProxy. Instead, + consumers must be supplied with the full list of hosts running + RabbitMQ with ``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` + option. For more information, read the `core issue + `_. + For more detail, read the `history and solution + `_. + +.. _rabbitmq-install: + +Install RabbitMQ +^^^^^^^^^^^^^^^^ + +The commands for installing RabbitMQ are specific to the Linux distribution +you are using. + +For Ubuntu or Debian: + +.. code-block: console + + # apt-get install rabbitmq-server + +For RHEL, Fedora, or CentOS: + +.. code-block: console + + # yum install rabbitmq-server + +For openSUSE: + +.. code-block: console + + # zypper install rabbitmq-server + +For SLES 12: + +.. code-block: console + + # zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo + [Verify the fingerprint of the imported GPG key. See below.] + # zypper install rabbitmq-server + +.. note:: + + For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. + You should verify the fingerprint of the imported GPG key before using it. + + .. code-block:: none + + Key ID: 893A90DAD85F9316 + Key Name: Cloud:OpenStack OBS Project + Key Fingerprint: 35B34E18ABC1076D66D5A86B893A90DAD85F9316 + Key Created: Tue Oct 8 13:34:21 2013 + Key Expires: Thu Dec 17 13:34:21 2015 + +For more information, see the official installation manual for the +distribution: + +- `Debian and Ubuntu `_ +- `RPM based `_ + (RHEL, Fedora, CentOS, openSUSE) + +.. _rabbitmq-configure: + +Configure RabbitMQ for HA queues +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. [TODO: This section should begin with a brief mention +.. about what HA queues are and why they are valuable, etc] + +.. [TODO: replace "currently" with specific release names] + +.. [TODO: Does this list need to be updated? Perhaps we need a table +.. that shows each component and the earliest release that allows it +.. to work with HA queues.] + +The following components/services can work with HA queues: + +- OpenStack Compute +- OpenStack Block Storage +- OpenStack Networking +- Telemetry + +Consider that, while exchanges and bindings survive the loss of individual +nodes, queues and their messages do not because a queue and its contents +are located on one node. If we lose this node, we also lose the queue. + +Mirrored queues in RabbitMQ improve the availability of service since +it is resilient to failures. + +Production servers should run (at least) three RabbitMQ servers for testing +and demonstration purposes, however it is possible to run only two servers. +In this section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. +To build a broker, ensure that all nodes have the same Erlang cookie file. + +.. [TODO: Should the example instead use a minimum of three nodes?] + +#. Stop RabbitMQ and copy the cookie from the first node to each of the + other node(s): + + .. code-block:: console + + # scp /var/lib/rabbitmq/.erlang.cookie root@NODE:/var/lib/rabbitmq/.erlang.cookie + +#. On each target node, verify the correct owner, + group, and permissions of the file :file:`erlang.cookie`: + + .. code-block:: console + + # chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie + # chmod 400 /var/lib/rabbitmq/.erlang.cookie + +#. Start the message queue service on all nodes and configure it to start + when the system boots. On Ubuntu, it is configured by default. + + On CentOS, RHEL, openSUSE, and SLES: + + .. code-block:: console + + # systemctl enable rabbitmq-server.service + # systemctl start rabbitmq-server.service + +#. Verify that the nodes are running: + + .. code-block:: console + + # rabbitmqctl cluster_status + Cluster status of node rabbit@NODE... + [{nodes,[{disc,[rabbit@NODE]}]}, + {running_nodes,[rabbit@NODE]}, + {partitions,[]}] + ...done. + +#. Run the following commands on each node except the first one: + + .. code-block:: console + + # rabbitmqctl stop_app + Stopping node rabbit@NODE... + ...done. + # rabbitmqctl join_cluster --ram rabbit@rabbit1 + # rabbitmqctl start_app + Starting node rabbit@NODE ... + ...done. + +.. note:: + + The default node type is a disc node. In this guide, nodes + join the cluster as RAM nodes. + +#. Verify the cluster status: + + .. code-block:: console + + # rabbitmqctl cluster_status + Cluster status of node rabbit@NODE... + [{nodes,[{disc,[rabbit@rabbit1]},{ram,[rabbit@NODE]}]}, \ + {running_nodes,[rabbit@NODE,rabbit@rabbit1]}] + + If the cluster is working, you can create usernames and passwords + for the queues. + +#. To ensure that all queues except those with auto-generated names + are mirrored across all running nodes, + set the ``ha-mode`` policy key to all + by running the following command on one of the nodes: + + .. code-block:: console + + # rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}' + +More information is available in the RabbitMQ documentation: + +- `Highly Available Queues `_ +- `Clustering Guide `_ + +.. note:: + + As another option to make RabbitMQ highly available, RabbitMQ contains the + OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. + It provides the active/active RabbitMQ cluster with mirrored queues. + For more information, see `Auto-configuration of a cluster with + a Pacemaker `_. + +.. _rabbitmq-services: + +Configure OpenStack services to use Rabbit HA queues +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Configure the OpenStack components to use at least two RabbitMQ nodes. + +Use these steps to configurate all services using RabbitMQ: + +#. RabbitMQ HA cluster ``host:port`` pairs: + + .. code-block:: console + + rabbit_hosts=rabbit1:5672,rabbit2:5672,rabbit3:5672 + +#. Retry connecting with RabbitMQ: + + .. code-block:: console + + rabbit_retry_interval=1 + +#. How long to back-off for between retries when connecting to RabbitMQ: + + .. code-block:: console + + rabbit_retry_backoff=2 + +#. Maximum retries with trying to connect to RabbitMQ (infinite by default): + + .. code-block:: console + + rabbit_max_retries=0 + +#. Use durable queues in RabbitMQ: + + .. code-block:: console + + rabbit_durable_queues=true + +#. Use HA queues in RabbitMQ (``x-ha-policy: all``): + + .. code-block:: console + + rabbit_ha_queues=true + +.. note:: + + If you change the configuration from an old set-up + that did not use HA queues, restart the service: + + .. code-block:: console + + # rabbitmqctl stop_app + # rabbitmqctl reset + # rabbitmqctl start_app + + + + + +Pacemaker active/passive +------------------------ + + + +Mirrored queues +--------------- + +Qpid +---- diff --git a/doc/source/control-plane-stateless.rst b/doc/source/control-plane-stateless.rst new file mode 100644 index 00000000..ca2731b7 --- /dev/null +++ b/doc/source/control-plane-stateless.rst @@ -0,0 +1,518 @@ +============================== +Configuring stateless services +============================== + +.. to do: scope what details we want on the following services + +API services +~~~~~~~~~~~~ + +Load-balancer +~~~~~~~~~~~~~ + +HAProxy +------- + +HAProxy provides a fast and reliable HTTP reverse proxy and load balancer +for TCP or HTTP applications. It is particularly suited for web crawling +under very high loads while needing persistence or Layer 7 processing. +It realistically supports tens of thousands of connections with recent +hardware. + +Each instance of HAProxy configures its front end to accept connections only +to the virtual IP (VIP) address. The HAProxy back end (termination +point) is a list of all the IP addresses of instances for load balancing. + +.. note:: + + Ensure your HAProxy installation is not a single point of failure, + it is advisable to have multiple HAProxy instances running. + + You can also ensure the availability by other means, using Keepalived + or Pacemaker. + +Alternatively, you can use a commercial load balancer, which is hardware +or software. We recommend a hardware load balancer as it generally has +good performance. + +For detailed instructions about installing HAProxy on your nodes, +see the HAProxy `official documentation `_. + +Configuring HAProxy +^^^^^^^^^^^^^^^^^^^ + +#. Restart the HAProxy service. + +#. Locate your HAProxy instance on each OpenStack controller in your + environment. The following is an example ``/etc/haproxy/haproxy.cfg`` + configuration file. Configure your instance using the following + configuration file, you will need a copy of it on each + controller node. + + + .. code-block:: none + + global + chroot /var/lib/haproxy + daemon + group haproxy + maxconn 4000 + pidfile /var/run/haproxy.pid + user haproxy + + defaults + log global + maxconn 4000 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout check 10s + + listen dashboard_cluster + bind :443 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:443 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:443 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:443 check inter 2000 rise 2 fall 5 + + listen galera_cluster + bind :3306 + balance source + option mysql-check + server controller1 10.0.0.12:3306 check port 9200 inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:3306 backup check port 9200 inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:3306 backup check port 9200 inter 2000 rise 2 fall 5 + + listen glance_api_cluster + bind :9292 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:9292 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:9292 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:9292 check inter 2000 rise 2 fall 5 + + listen glance_registry_cluster + bind :9191 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:9191 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:9191 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:9191 check inter 2000 rise 2 fall 5 + + listen keystone_admin_cluster + bind :35357 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:35357 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:35357 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:35357 check inter 2000 rise 2 fall 5 + + listen keystone_public_internal_cluster + bind :5000 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:5000 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:5000 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:5000 check inter 2000 rise 2 fall 5 + + listen nova_ec2_api_cluster + bind :8773 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:8773 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8773 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8773 check inter 2000 rise 2 fall 5 + + listen nova_compute_api_cluster + bind :8774 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:8774 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8774 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8774 check inter 2000 rise 2 fall 5 + + listen nova_metadata_api_cluster + bind :8775 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:8775 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8775 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8775 check inter 2000 rise 2 fall 5 + + listen cinder_api_cluster + bind :8776 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:8776 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8776 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8776 check inter 2000 rise 2 fall 5 + + listen ceilometer_api_cluster + bind :8777 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:8777 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8777 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8777 check inter 2000 rise 2 fall 5 + + listen nova_vncproxy_cluster + bind :6080 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:6080 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:6080 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:6080 check inter 2000 rise 2 fall 5 + + listen neutron_api_cluster + bind :9696 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:9696 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:9696 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:9696 check inter 2000 rise 2 fall 5 + + listen swift_proxy_cluster + bind :8080 + balance source + option tcplog + option tcpka + server controller1 10.0.0.12:8080 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8080 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8080 check inter 2000 rise 2 fall 5 + + .. note:: + + The Galera cluster configuration directive ``backup`` indicates + that two of the three controllers are standby nodes. + This ensures that only one node services write requests + because OpenStack support for multi-node writes is not yet production-ready. + + .. note:: + + The Telemetry API service configuration does not have the ``option httpchk`` + directive as it cannot process this check properly. + +.. TODO: explain why the Telemetry API is so special + +#. Configure the kernel parameter to allow non-local IP binding. This allows + running HAProxy instances to bind to a VIP for failover. Add following line + to ``/etc/sysctl.conf``: + + .. code-block:: none + + net.ipv4.ip_nonlocal_bind = 1 + +#. Restart the host or, to make changes work immediately, invoke: + + .. code-block:: console + + $ sysctl -p + +#. Add HAProxy to the cluster and ensure the VIPs can only run on machines + where HAProxy is active: + + ``pcs`` + + .. code-block:: console + + $ pcs resource create lb-haproxy systemd:haproxy --clone + $ pcs constraint order start vip then lb-haproxy-clone kind=Optional + $ pcs constraint colocation add lb-haproxy-clone with vip + + ``crmsh`` + + .. code-block:: console + + $ crm cib new conf-haproxy + $ crm configure primitive haproxy lsb:haproxy op monitor interval="1s" + $ crm configure clone haproxy-clone haproxy + $ crm configure colocation vip-with-haproxy inf: vip haproxy-clone + $ crm configure order haproxy-after-vip mandatory: vip haproxy-clone + + +Pacemaker versus systemd +------------------------ + +Memcached +--------- + +Memcached is a general-purpose distributed memory caching system. It +is used to speed up dynamic database-driven websites by caching data +and objects in RAM to reduce the number of times an external data +source must be read. + +Memcached is a memory cache demon that can be used by most OpenStack +services to store ephemeral data, such as tokens. + +Access to Memcached is not handled by HAProxy because replicated +access is currently in an experimental state. Instead, OpenStack +services must be supplied with the full list of hosts running +Memcached. + +The Memcached client implements hashing to balance objects among the +instances. Failure of an instance impacts only a percentage of the +objects and the client automatically removes it from the list of +instances. The SLA is several minutes. + + +Highly available API services +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Identity API +------------ + +Ensure you have read the +`OpenStack Identity service getting started documentation +`_. + +.. to do: reference controller-ha-identity and see if section involving + adding to pacemaker is in scope + + +Add OpenStack Identity resource to Pacemaker +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following section(s) detail how to add the Identity service +to Pacemaker on SUSE and Red Hat. + +SUSE +---- + +SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE, +use a set of OCF agents for controlling OpenStack services. + +#. Run the following commands to download the OpenStack Identity resource + to Pacemaker: + + .. code-block:: console + + # cd /usr/lib/ocf/resource.d + # mkdir openstack + # cd openstack + # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/keystone + # chmod a+rx * + +#. Add the Pacemaker configuration for the OpenStack Identity resource + by running the following command to connect to the Pacemaker cluster: + + .. code-block:: console + + # crm configure + +#. Add the following cluster resources: + + .. code-block:: console + + clone p_keystone ocf:openstack:keystone \ + params config="/etc/keystone/keystone.conf" os_password="secretsecret" os_username="admin" os_tenant_name="admin" os_auth_url="http://10.0.0.11:5000/v2.0/" \ + op monitor interval="30s" timeout="30s" + + .. note:: + + This configuration creates ``p_keystone``, + a resource for managing the OpenStack Identity service. + +#. Commit your configuration changes from the :command:`crm configure` menu + with the following command: + + .. code-block:: console + + # commit + + The :command:`crm configure` supports batch input. You may have to copy and + paste the above lines into your live Pacemaker configuration, and then make + changes as required. + + For example, you may enter ``edit p_ip_keystone`` from the + :command:`crm configure` menu and edit the resource to match your preferred + virtual IP address. + + Pacemaker now starts the OpenStack Identity service and its dependent + resources on all of your nodes. + +Red Hat +-------- + +For Red Hat Enterprise Linux and Red Hat-based Linux distributions, +the following process uses Systemd unit files. + +.. code-block:: console + + # pcs resource create openstack-keystone systemd:openstack-keystone --clone interleave=true + +.. _identity-config-identity: + +Configure OpenStack Identity service +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Edit the :file:`keystone.conf` file + to change the values of the :manpage:`bind(2)` parameters: + + .. code-block:: ini + + bind_host = 10.0.0.12 + public_bind_host = 10.0.0.12 + admin_bind_host = 10.0.0.12 + + The ``admin_bind_host`` parameter + lets you use a private network for admin access. + +#. To be sure that all data is highly available, + ensure that everything is stored in the MySQL database + (which is also highly available): + + .. code-block:: ini + + [catalog] + driver = keystone.catalog.backends.sql.Catalog + # ... + [identity] + driver = keystone.identity.backends.sql.Identity + # ... + +#. If the Identity service will be sending ceilometer notifications + and your message bus is configured for high availability, you will + need to ensure that the Identity service is correctly configured to + use it. + +.. _identity-services-config: + +Configure OpenStack services to use the highly available OpenStack Identity +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Your OpenStack services now point their OpenStack Identity configuration +to the highly available virtual cluster IP address. + +#. For OpenStack Compute service, (if your OpenStack Identity service + IP address is 10.0.0.11) use the following configuration in the + :file:`api-paste.ini` file: + + .. code-block:: ini + + auth_host = 10.0.0.11 + +#. Create the OpenStack Identity Endpoint with this IP address. + + .. note:: + + If you are using both private and public IP addresses, + create two virtual IP addresses and define the endpoint. For + example: + + .. code-block:: console + + $ openstack endpoint create --region $KEYSTONE_REGION \ + $service-type public http://PUBLIC_VIP:5000/v2.0 + $ openstack endpoint create --region $KEYSTONE_REGION \ + $service-type admin http://10.0.0.11:35357/v2.0 + $ openstack endpoint create --region $KEYSTONE_REGION \ + $service-type internal http://10.0.0.11:5000/v2.0 + +#. If you are using Dashboard (horizon), edit the :file:`local_settings.py` + file to include the following: + + .. code-block:: ini + + OPENSTACK_HOST = 10.0.0.11 + + +Telemetry API +------------- + +The Telemetry polling agent can be configured to partition its polling +workload between multiple agents. This enables high availability (HA). + +Both the central and the compute agent can run in an HA deployment. +This means that multiple instances of these services can run in +parallel with workload partitioning among these running instances. + +The `Tooz `_ library provides +the coordination within the groups of service instances. +It provides an API above several back ends that can be used for building +distributed applications. + +Tooz supports +`various drivers `_ +including the following back end solutions: + +* `Zookeeper `_: + Recommended solution by the Tooz project. + +* `Redis `_: + Recommended solution by the Tooz project. + +* `Memcached `_: + Recommended for testing. + +You must configure a supported Tooz driver for the HA deployment of +the Telemetry services. + +For information about the required configuration options +to set in the :file:`ceilometer.conf`, see the `coordination section +`_ +in the OpenStack Configuration Reference. + +.. note:: + + Only one instance for the central and compute agent service(s) is able + to run and function correctly if the ``backend_url`` option is not set. + +The availability check of the instances is provided by heartbeat messages. +When the connection with an instance is lost, the workload will be +reassigned within the remaining instances in the next polling cycle. + +.. note:: + + Memcached uses a timeout value, which should always be set to + a value that is higher than the heartbeat value set for Telemetry. + +For backward compatibility and supporting existing deployments, the central +agent configuration supports using different configuration files. This is for +groups of service instances that are running in parallel. +For enabling this configuration, set a value for the +``partitioning_group_prefix`` option in the +`polling section `_ +in the OpenStack Configuration Reference. + +.. warning:: + + For each sub-group of the central agent pool with the same + ``partitioning_group_prefix``, a disjoint subset of meters must be polled + to avoid samples being missing or duplicated. The list of meters to poll + can be set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. + For more information about pipelines see the `Data processing and pipelines + `_ + section. + +To enable the compute agent to run multiple instances simultaneously with +workload partitioning, the ``workload_partitioning`` option must be set to +``True`` under the `compute section `_ +in the :file:`ceilometer.conf` configuration file. + + +.. To Do: Cover any other projects here with API services which require specific + HA details. diff --git a/doc/source/control-plane.rst b/doc/source/control-plane.rst new file mode 100644 index 00000000..36ede082 --- /dev/null +++ b/doc/source/control-plane.rst @@ -0,0 +1,9 @@ +=========================== +Configuring a control plane +=========================== + +.. toctree:: + :maxdepth: 2 + + control-plane-stateless.rst + control-plane-stateful.rst diff --git a/doc/source/figures/Cluster-deployment-collapsed.png b/doc/source/figures/Cluster-deployment-collapsed.png new file mode 100644 index 00000000..91feec0b Binary files /dev/null and b/doc/source/figures/Cluster-deployment-collapsed.png differ diff --git a/doc/source/figures/Cluster-deployment-segregated.png b/doc/source/figures/Cluster-deployment-segregated.png new file mode 100644 index 00000000..a504ae18 Binary files /dev/null and b/doc/source/figures/Cluster-deployment-segregated.png differ diff --git a/doc/source/ha-community.rst b/doc/source/ha-community.rst new file mode 100644 index 00000000..560b9db3 --- /dev/null +++ b/doc/source/ha-community.rst @@ -0,0 +1,15 @@ +============ +HA community +============ + +The OpenStack HA community holds `weekly IRC meetings +`_ to discuss +a range of topics relating to HA in OpenStack. Everyone interested is +encouraged to attend. The `logs of all previous meetings +`_ are available to read. + +You can contact the HA community directly in `the #openstack-ha +channel on Freenode IRC `_, or by +sending mail to the `openstack-dev +`_ +mailing list with the ``[HA]`` prefix in the ``Subject`` header. diff --git a/doc/source/index.rst b/doc/source/index.rst index b7ce2dd6..68c8b081 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -5,8 +5,31 @@ OpenStack High Availability Guide Abstract ~~~~~~~~ -This guide provides information about configuring OpenStack services for high -availability. +This guide describes how to install and configure OpenStack for high +availability. It supplements the Installation Guides +and assumes that you are familiar with the material in those guides. -This is a placeholder while we migrate information over from another repo. +.. warning:: + This guide is a work-in-progress and changing rapidly + while we continue to test and enhance the guidance. There are + open `TODO` items throughout and available on the OpenStack manuals + `bug list `_. + Please help where you are able. + +.. toctree:: + :maxdepth: 1 + + common/conventions.rst + overview.rst + intro-ha.rst + intro-os-ha.rst + control-plane.rst + networking-ha.rst + storage-ha.rst + compute-node-ha.rst + monitoring.rst + testing.rst + ref-arch-examples.rst + ha-community.rst + common/appendix.rst diff --git a/doc/source/intro-ha-common-tech.rst b/doc/source/intro-ha-common-tech.rst new file mode 100644 index 00000000..7f9cae0b --- /dev/null +++ b/doc/source/intro-ha-common-tech.rst @@ -0,0 +1,127 @@ +======================== +Commonly used technology +======================== +High availability can be achieved only on system level, while both hardware and +software components can contribute to the system level availability. +This document lists the most common hardware and software technologies +that can be used to build a highly available system. + +Hardware +~~~~~~~~ +Using different technologies to enable high availability on the hardware +level provides a good basis to build a high available system. The next chapters +discuss the most common technologies used in this field. + +Redundant switches +------------------ +Network switches are single point of failures as networking is critical to +operate all other basic domains of the infrastructure, like compute and +storage. Network switches need to be able to forward the network traffic +and be able to forward the traffic to a working next hop. +For these reasons consider the following two factors when making a network +switch redundant: + +#. The network switch itself should synchronize its internal state to a + redundant switch either in active/active or active/passive way. + +#. The network topology should be designed in a way that the network router can + use at least two paths in every critical direction. + +Bonded interfaces +----------------- +Bonded interfaces are two independent physical network interfaces handled as +one interface in active/passive or in active/active redundancy mode. In +active/passive mode, if an error happens in the active network interface or in +the remote end of the interface, the interfaces are switched over. In +active/active mode, when an error happens in an interface or in the remote end +of an interface, then the interface is marked as unavailable and ceases to be +used. + +Load balancers +-------------- +Physical load balancers are special routers which direct the traffic in +different directions based on a set of rules. Load balancers can be in +redundant mode similarly to the physical switches. +Load balancers are also important for distributing the traffic to the different +active/active components of the system. + +Storage +------- +Physical storage high availability can be achieved with different scopes: + +#. High availability within a hardware unit with redundant disks (mostly + organized into different RAID configurations), redundant control components, + redundant I/O interfaces and redundant power supply. + +#. System level high availability with redundant hardware units with data + replication. + +Software +~~~~~~~~ + +HAproxy +------- + +HAProxy provides a fast and reliable HTTP reverse proxy and load balancer +for TCP or HTTP applications. It is particularly suited for web crawling +under very high loads while needing persistence or Layer 7 processing. +It realistically supports tens of thousands of connections with recent +hardware. + +.. note:: + + Ensure your HAProxy installation is not a single point of failure, + it is advisable to have multiple HAProxy instances running. + + You can also ensure the availability by other means, using Keepalived + or Pacemaker. + +Alternatively, you can use a commercial load balancer, which is hardware +or software. We recommend a hardware load balancer as it generally has +good performance. + +For detailed instructions about installing HAProxy on your nodes, +see the HAProxy `official documentation `_. + +keepalived +---------- + +`keepalived `_ is a routing software that +provides facilities for load balancing and high-availability to Linux +system and Linux based infrastructures. + +Keepalived implements a set of checkers to dynamically and +adaptively maintain and manage loadbalanced server pool according +their health. + +The keepalived daemon can be used to monitor services or systems and +to automatically failover to a standby if problems occur. + +Pacemaker +--------- + +`Pacemaker `_ cluster stack is a state-of-the-art +high availability and load balancing stack for the Linux platform. +Pacemaker is used to make OpenStack infrastructure highly available. + +Pacemaker relies on the +`Corosync `_ messaging layer +for reliable cluster communications. Corosync implements the Totem single-ring +ordering and membership protocol. It also provides UDP and InfiniBand based +messaging, quorum, and cluster membership to Pacemaker. + +Pacemaker does not inherently understand the applications it manages. +Instead, it relies on resource agents (RAs) that are scripts that encapsulate +the knowledge of how to start, stop, and check the health of each application +managed by the cluster. + +These agents must conform to one of the `OCF `_, +`SysV Init `_, Upstart, or Systemd standards. + +Pacemaker ships with a large set of OCF agents (such as those managing +MySQL databases, virtual IP addresses, and RabbitMQ), but can also use +any agents already installed on your system and can be extended with +your own (see the +`developer guide `_). diff --git a/doc/source/intro-ha-key-concepts.rst b/doc/source/intro-ha-key-concepts.rst new file mode 100644 index 00000000..4a75d53b --- /dev/null +++ b/doc/source/intro-ha-key-concepts.rst @@ -0,0 +1,147 @@ +============ +Key concepts +============ + +Redundancy and failover +~~~~~~~~~~~~~~~~~~~~~~~ + +High availability is implemented with redundant hardware +running redundant instances of each service. +If one piece of hardware running one instance of a service fails, +the system can then failover to use another instance of a service +that is running on hardware that did not fail. + +A crucial aspect of high availability +is the elimination of single points of failure (SPOFs). +A SPOF is an individual piece of equipment or software +that causes system downtime or data loss if it fails. +In order to eliminate SPOFs, check that mechanisms exist for redundancy of: + +- Network components, such as switches and routers + +- Applications and automatic service migration + +- Storage components + +- Facility services such as power, air conditioning, and fire protection + +In the event that a component fails and a back-up system must take on +its load, most high availability systems will replace the failed +component as quickly as possible to maintain necessary redundancy. This +way time spent in a degraded protection state is minimized. + +Most high availability systems fail in the event of multiple +independent (non-consequential) failures. In this case, most +implementations favor protecting data over maintaining availability. + +High availability systems typically achieve an uptime percentage of +99.99% or more, which roughly equates to less than an hour of +cumulative downtime per year. In order to achieve this, high +availability systems should keep recovery times after a failure to +about one to two minutes, sometimes significantly less. + +OpenStack currently meets such availability requirements for its own +infrastructure services, meaning that an uptime of 99.99% is feasible +for the OpenStack infrastructure proper. However, OpenStack does not +guarantee 99.99% availability for individual guest instances. + +This document discusses some common methods of implementing highly +available systems, with an emphasis on the core OpenStack services and +other open source services that are closely aligned with OpenStack. + +You will need to address high availability concerns for any applications +software that you run on your OpenStack environment. The important thing is +to make sure that your services are redundant and available. +How you achieve that is up to you. + +Active/passive versus active/active +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Stateful services can be configured as active/passive or active/active, +which are defined as follows: + +:term:`active/passive configuration` + Maintains a redundant instance + that can be brought online when the active service fails. + For example, OpenStack writes to the main database + while maintaining a disaster recovery database that can be brought online + if the main database fails. + + A typical active/passive installation for a stateful service maintains + a replacement resource that can be brought online when required. + Requests are handled using a :term:`virtual IP address (VIP)` that + facilitates returning to service with minimal reconfiguration. + A separate application (such as Pacemaker or Corosync) monitors + these services, bringing the backup online as necessary. + +:term:`active/active configuration` + Each service also has a backup but manages both the main and + redundant systems concurrently. + This way, if there is a failure, the user is unlikely to notice. + The backup system is already online and takes on increased load + while the main system is fixed and brought back online. + + Typically, an active/active installation for a stateless service + maintains a redundant instance, and requests are load balanced using + a virtual IP address and a load balancer such as HAProxy. + + A typical active/active installation for a stateful service includes + redundant services, with all instances having an identical state. In + other words, updates to one instance of a database update all other + instances. This way a request to one instance is the same as a + request to any other. A load balancer manages the traffic to these + systems, ensuring that operational systems always handle the + request. + +Clusters and quorums +~~~~~~~~~~~~~~~~~~~~ + +The quorum specifies the minimal number of nodes +that must be functional in a cluster of redundant nodes +in order for the cluster to remain functional. +When one node fails and failover transfers control to other nodes, +the system must ensure that data and processes remain sane. +To determine this, the contents of the remaining nodes are compared +and, if there are discrepancies, a majority rules algorithm is implemented. + +For this reason, each cluster in a high availability environment should +have an odd number of nodes and the quorum is defined as more than a half +of the nodes. +If multiple nodes fail so that the cluster size falls below the quorum +value, the cluster itself fails. + +For example, in a seven-node cluster, the quorum should be set to +``floor(7/2) + 1 == 4``. If quorum is four and four nodes fail simultaneously, +the cluster itself would fail, whereas it would continue to function, if +no more than three nodes fail. If split to partitions of three and four nodes +respectively, the quorum of four nodes would continue to operate the majority +partition and stop or fence the minority one (depending on the +no-quorum-policy cluster configuration). + +And the quorum could also have been set to three, just as a configuration +example. + +.. note:: + + We do not recommend setting the quorum to a value less than ``floor(n/2) + 1`` + as it would likely cause a split-brain in a face of network partitions. + +When four nodes fail simultaneously, the cluster would continue to function as +well. But if split to partitions of three and four nodes respectively, the +quorum of three would have made both sides to attempt to fence the other and +host resources. Without fencing enabled, it would go straight to running +two copies of each resource. + +This is why setting the quorum to a value less than ``floor(n/2) + 1`` is +dangerous. However it may be required for some specific cases, such as a +temporary measure at a point it is known with 100% certainty that the other +nodes are down. + +When configuring an OpenStack environment for study or demonstration purposes, +it is possible to turn off the quorum checking. Production systems should +always run with quorum enabled. + +Load balancing +~~~~~~~~~~~~~~ + +.. to do: definition and description of need within HA diff --git a/doc/source/intro-ha.rst b/doc/source/intro-ha.rst new file mode 100644 index 00000000..d798c46c --- /dev/null +++ b/doc/source/intro-ha.rst @@ -0,0 +1,24 @@ +================================= +Introduction to high availability +================================= + +High availability systems seek to minimize the following issues: + +#. System downtime: Occurs when a user-facing service is unavailable + beyond a specified maximum amount of time. + +#. Data loss: Accidental deletion or destruction of data. + +Most high availability systems guarantee protection against system downtime +and data loss only in the event of a single failure. +However, they are also expected to protect against cascading failures, +where a single failure deteriorates into a series of consequential failures. +Many service providers guarantee a :term:`Service Level Agreement (SLA)` +including uptime percentage of computing service, which is calculated based +on the available time and system downtime excluding planned outage time. + +.. toctree:: + :maxdepth: 2 + + intro-ha-key-concepts.rst + intro-ha-common-tech.rst diff --git a/doc/source/intro-os-ha-cluster.rst b/doc/source/intro-os-ha-cluster.rst new file mode 100644 index 00000000..555ee263 --- /dev/null +++ b/doc/source/intro-os-ha-cluster.rst @@ -0,0 +1,67 @@ +================ +Cluster managers +================ + +At its core, a cluster is a distributed finite state machine capable +of co-ordinating the startup and recovery of inter-related services +across a set of machines. + +Even a distributed or replicated application that is able to survive failures +on one or more machines can benefit from a cluster manager because a cluster +manager has the following capabilities: + +#. Awareness of other applications in the stack + + While SYS-V init replacements like systemd can provide + deterministic recovery of a complex stack of services, the + recovery is limited to one machine and lacks the context of what + is happening on other machines. This context is crucial to + determine the difference between a local failure, and clean startup + and recovery after a total site failure. + +#. Awareness of instances on other machines + + Services like RabbitMQ and Galera have complicated boot-up + sequences that require co-ordination, and often serialization, of + startup operations across all machines in the cluster. This is + especially true after a site-wide failure or shutdown where you must + first determine the last machine to be active. + +#. A shared implementation and calculation of `quorum + `_ + + It is very important that all members of the system share the same + view of who their peers are and whether or not they are in the + majority. Failure to do this leads very quickly to an internal + `split-brain `_ + state. This is where different parts of the system are pulling in + different and incompatible directions. + +#. Data integrity through fencing (a non-responsive process does not + imply it is not doing anything) + + A single application does not have sufficient context to know the + difference between failure of a machine and failure of the + application on a machine. The usual practice is to assume the + machine is dead and continue working, however this is highly risky. A + rogue process or machine could still be responding to requests and + generally causing havoc. The safer approach is to make use of + remotely accessible power switches and/or network switches and SAN + controllers to fence (isolate) the machine before continuing. + +#. Automated recovery of failed instances + + While the application can still run after the failure of several + instances, it may not have sufficient capacity to serve the + required volume of requests. A cluster can automatically recover + failed instances to prevent additional load induced failures. + +Pacemaker +~~~~~~~~~ +.. to do: description and point to ref arch example using pacemaker + +`Pacemaker `_. + +Systemd +~~~~~~~ +.. to do: description and point to ref arch example using Systemd and link diff --git a/doc/source/intro-os-ha-memcached.rst b/doc/source/intro-os-ha-memcached.rst new file mode 100644 index 00000000..a1b3d8a1 --- /dev/null +++ b/doc/source/intro-os-ha-memcached.rst @@ -0,0 +1,35 @@ +========= +Memcached +========= + +Most OpenStack services can use Memcached to store ephemeral data such as +tokens. Although Memcached does not support typical forms of redundancy such +as clustering, OpenStack services can use almost any number of instances +by configuring multiple hostnames or IP addresses. + +The Memcached client implements hashing to balance objects among the instances. +Failure of an instance only impacts a percentage of the objects, +and the client automatically removes it from the list of instances. + +Installation +~~~~~~~~~~~~ + +To install and configure Memcached, read the +`official documentation `_. + +Memory caching is managed by `oslo.cache +`_. +This ensures consistency across all projects when using multiple Memcached +servers. The following is an example configuration with three hosts: + +.. code-block:: ini + + Memcached_servers = controller1:11211,controller2:11211,controller3:11211 + +By default, ``controller1`` handles the caching service. If the host goes down, +``controller2`` or ``controller3`` will complete the service. + +For more information about Memcached installation, see the +*Environment -> Memcached* section in the +`Installation Guides `_ +depending on your distribution. diff --git a/doc/source/intro-os-ha-state.rst b/doc/source/intro-os-ha-state.rst new file mode 100644 index 00000000..ba7703e8 --- /dev/null +++ b/doc/source/intro-os-ha-state.rst @@ -0,0 +1,52 @@ +================================== +Stateless versus stateful services +================================== + +OpenStack components can be divided into three categories: + +- OpenStack APIs: APIs that are HTTP(s) stateless services written in python, + easy to duplicate and mostly easy to load balance. + +- The SQL relational database server provides stateful type consumed by other + components. Supported databases are MySQL, MariaDB, and PostgreSQL. + Making the SQL database redundant is complex. + +- :term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack + internal stateful communication service. + +.. to do: Ensure the difference between stateless and stateful services +.. is clear + +Stateless services +~~~~~~~~~~~~~~~~~~ + +A service that provides a response after your request and then +requires no further attention. To make a stateless service highly +available, you need to provide redundant instances and load balance them. + +Stateless OpenStack services +---------------------------- + +OpenStack services that are stateless include ``nova-api``, +``nova-conductor``, ``glance-api``, ``keystone-api``, ``neutron-api``, +and ``nova-scheduler``. + +Stateful services +~~~~~~~~~~~~~~~~~ + +A service where subsequent requests to the service +depend on the results of the first request. +Stateful services are more difficult to manage because a single +action typically involves more than one request. Providing +additional instances and load balancing does not solve the problem. +For example, if the horizon user interface reset itself every time +you went to a new page, it would not be very useful. +OpenStack services that are stateful include the OpenStack database +and message queue. +Making stateful services highly available can depend on whether you choose +an active/passive or active/active configuration. + +Stateful OpenStack services +---------------------------- + +.. to do: create list of stateful services diff --git a/doc/source/intro-os-ha.rst b/doc/source/intro-os-ha.rst new file mode 100644 index 00000000..5613122a --- /dev/null +++ b/doc/source/intro-os-ha.rst @@ -0,0 +1,12 @@ +================================================ +Introduction to high availability with OpenStack +================================================ + +.. to do: description of section & improvement of title (intro to OS HA) + +.. toctree:: + :maxdepth: 2 + + intro-os-ha-state.rst + intro-os-ha-cluster.rst + intro-os-ha-memcached.rst diff --git a/doc/source/monitoring.rst b/doc/source/monitoring.rst new file mode 100644 index 00000000..a1b13277 --- /dev/null +++ b/doc/source/monitoring.rst @@ -0,0 +1,6 @@ +========== +Monitoring +========== + + + diff --git a/doc/source/networking-ha-l3-agent.rst b/doc/source/networking-ha-l3-agent.rst new file mode 100644 index 00000000..5a6370ae --- /dev/null +++ b/doc/source/networking-ha-l3-agent.rst @@ -0,0 +1,20 @@ +======== +L3 Agent +======== +.. TODO: Introduce L3 agent + +HA Routers +~~~~~~~~~~ +.. TODO: content for HA routers + +Networking DHCP agent +~~~~~~~~~~~~~~~~~~~~~ +The OpenStack Networking (neutron) service has a scheduler that lets you run +multiple agents across nodes. The DHCP agent can be natively highly available. + +To configure the number of DHCP agents per network, modify the +``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron.conf` +file. By default this is set to 1. To achieve high availability, assign more +than one DHCP agent per network. For more information, see +`High-availability for DHCP +`_. diff --git a/doc/source/networking-ha-neutron-l3-analysis.rst b/doc/source/networking-ha-neutron-l3-analysis.rst new file mode 100644 index 00000000..7a803132 --- /dev/null +++ b/doc/source/networking-ha-neutron-l3-analysis.rst @@ -0,0 +1,6 @@ +========== +Neutron L3 +========== + +.. TODO: create and import Neutron L3 analysis + Introduce the Networking (neutron) service L3 agent diff --git a/doc/source/networking-ha-neutron-server.rst b/doc/source/networking-ha-neutron-server.rst new file mode 100644 index 00000000..646eb66a --- /dev/null +++ b/doc/source/networking-ha-neutron-server.rst @@ -0,0 +1,5 @@ +========================= +Neutron Networking server +========================= + +.. TODO: Create content similar to other API sections diff --git a/doc/source/networking-ha.rst b/doc/source/networking-ha.rst new file mode 100644 index 00000000..fa1ae1e0 --- /dev/null +++ b/doc/source/networking-ha.rst @@ -0,0 +1,29 @@ +=================================== +Configuring the networking services +=================================== + +Configure networking on each node. See the basic information about +configuring networking in the Networking service section of the +`Install Guides `_, +depending on your distribution. + +OpenStack network nodes contain: + +- Networking DHCP agent +- Neutron L3 agent +- Networking L2 agent + +.. note:: + + The L2 agent cannot be distributed and highly available. Instead, it + must be installed on each data forwarding node to control the virtual + network driver such as Open vSwitch or Linux Bridge. One L2 agent runs + per node and controls its virtual interfaces. + +.. toctree:: + :maxdepth: 2 + + networking-ha-neutron-server.rst + networking-ha-neutron-l3-analysis.rst + networking-ha-l3-agent.rst + diff --git a/doc/source/overview.rst b/doc/source/overview.rst new file mode 100644 index 00000000..7b64054e --- /dev/null +++ b/doc/source/overview.rst @@ -0,0 +1,24 @@ +======== +Overview +======== + +This guide can be split into two parts: + +#. High level architecture +#. Reference architecture examples, monitoring, and testing + +.. warning:: + We recommend using this guide for assistance when considering your HA cloud. + We do not recommend using this guide for manually building your HA cloud. + We recommend starting with a pre-validated solution and adjusting to your + needs. + +High availability is not for every user. It presents some challenges. +High availability may be too complex for databases or +systems with large amounts of data. Replication can slow large systems +down. Different setups have different prerequisites. Read the guidelines +for each setup. + +.. important:: + + High availability is turned off as the default in OpenStack setups. diff --git a/doc/source/ref-arch-examples.rst b/doc/source/ref-arch-examples.rst new file mode 100644 index 00000000..dc842f3f --- /dev/null +++ b/doc/source/ref-arch-examples.rst @@ -0,0 +1,3 @@ +====================== +Reference Architecture +====================== diff --git a/doc/source/storage-ha-backend.rst b/doc/source/storage-ha-backend.rst new file mode 100644 index 00000000..8148b528 --- /dev/null +++ b/doc/source/storage-ha-backend.rst @@ -0,0 +1,59 @@ + +.. _storage-ha-backend: + +================ +Storage back end +================ + +An OpenStack environment includes multiple data pools for the VMs: + +- Ephemeral storage is allocated for an instance and is deleted when the + instance is deleted. The Compute service manages ephemeral storage and + by default, Compute stores ephemeral drives as files on local disks on the + compute node. As an alternative, you can use Ceph RBD as the storage back + end for ephemeral storage. + +- Persistent storage exists outside all instances. Two types of persistent + storage are provided: + + - The Block Storage service (cinder) that can use LVM or Ceph RBD as the + storage back end. + - The Image service (glance) that can use the Object Storage service (swift) + or Ceph RBD as the storage back end. + +For more information about configuring storage back ends for +the different storage options, see `Manage volumes +`_ +in the OpenStack Administrator Guide. + +This section discusses ways to protect against data loss in your OpenStack +environment. + +RAID drives +----------- + +Configuring RAID on the hard drives that implement storage protects your data +against a hard drive failure. If the node itself fails, data may be lost. +In particular, all volumes stored on an LVM node can be lost. + +Ceph +---- + +`Ceph RBD `_ is an innately high availability storage back +end. It creates a storage cluster with multiple nodes that communicate with +each other to replicate and redistribute data dynamically. +A Ceph RBD storage cluster provides a single shared set of storage nodes that +can handle all classes of persistent and ephemeral data (glance, cinder, and +nova) that are required for OpenStack instances. + +Ceph RBD provides object replication capabilities by storing Block Storage +volumes as Ceph RBD objects. Ceph RBD ensures that each replica of an object +is stored on a different node. This means that your volumes are protected +against hard drive and node failures, or even the failure of the data center +itself. + +When Ceph RBD is used for ephemeral volumes as well as block and image storage, +it supports `live migration +`_ +of VMs with ephemeral drives. LVM only supports live migration of +volume-backed VMs. diff --git a/doc/source/storage-ha-block.rst b/doc/source/storage-ha-block.rst new file mode 100644 index 00000000..c8d48d5a --- /dev/null +++ b/doc/source/storage-ha-block.rst @@ -0,0 +1,192 @@ +================================== +Highly available Block Storage API +================================== + +Cinder provides Block-Storage-as-a-Service suitable for performance +sensitive scenarios such as databases, expandable file systems, or +providing a server with access to raw block level storage. + +Persistent block storage can survive instance termination and can also +be moved across instances like any external storage device. Cinder +also has volume snapshots capability for backing up the volumes. + +Making the Block Storage API service highly available in +active/passive mode involves: + +- :ref:`ha-blockstorage-pacemaker` +- :ref:`ha-blockstorage-configure` +- :ref:`ha-blockstorage-services` + +In theory, you can run the Block Storage service as active/active. +However, because of sufficient concerns, we recommend running +the volume component as active/passive only. + +You can read more about these concerns on the +`Red Hat Bugzilla `_ +and there is a +`psuedo roadmap `_ +for addressing them upstream. + +.. _ha-blockstorage-pacemaker: + +Add Block Storage API resource to Pacemaker +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +On RHEL-based systems, create resources for cinder's systemd agents and create +constraints to enforce startup/shutdown ordering: + +.. code-block:: console + + pcs resource create openstack-cinder-api systemd:openstack-cinder-api --clone interleave=true + pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler --clone interleave=true + pcs resource create openstack-cinder-volume systemd:openstack-cinder-volume + + pcs constraint order start openstack-cinder-api-clone then openstack-cinder-scheduler-clone + pcs constraint colocation add openstack-cinder-scheduler-clone with openstack-cinder-api-clone + pcs constraint order start openstack-cinder-scheduler-clone then openstack-cinder-volume + pcs constraint colocation add openstack-cinder-volume with openstack-cinder-scheduler-clone + + +If the Block Storage service runs on the same nodes as the other services, +then it is advisable to also include: + +.. code-block:: console + + pcs constraint order start openstack-keystone-clone then openstack-cinder-api-clone + +Alternatively, instead of using systemd agents, download and +install the OCF resource agent: + +.. code-block:: console + + # cd /usr/lib/ocf/resource.d/openstack + # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/cinder-api + # chmod a+rx * + +You can now add the Pacemaker configuration for Block Storage API resource. +Connect to the Pacemaker cluster with the :command:`crm configure` command +and add the following cluster resources: + +.. code-block:: none + + primitive p_cinder-api ocf:openstack:cinder-api \ + params config="/etc/cinder/cinder.conf" \ + os_password="secretsecret" \ + os_username="admin" \ + os_tenant_name="admin" \ + keystone_get_token_url="http://10.0.0.11:5000/v2.0/tokens" \ + op monitor interval="30s" timeout="30s" + +This configuration creates ``p_cinder-api``, a resource for managing the +Block Storage API service. + +The command :command:`crm configure` supports batch input, copy and paste the +lines above into your live Pacemaker configuration and then make changes as +required. For example, you may enter ``edit p_ip_cinder-api`` from the +:command:`crm configure` menu and edit the resource to match your preferred +virtual IP address. + +Once completed, commit your configuration changes by entering :command:`commit` +from the :command:`crm configure` menu. Pacemaker then starts the Block Storage +API service and its dependent resources on one of your nodes. + +.. _ha-blockstorage-configure: + +Configure Block Storage API service +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Edit the ``/etc/cinder/cinder.conf`` file. For example, on a RHEL-based system: + +.. code-block:: ini + :linenos: + + [DEFAULT] + # This is the name which we should advertise ourselves as and for + # A/P installations it should be the same everywhere + host = cinder-cluster-1 + + # Listen on the Block Storage VIP + osapi_volume_listen = 10.0.0.11 + + auth_strategy = keystone + control_exchange = cinder + + volume_driver = cinder.volume.drivers.nfs.NfsDriver + nfs_shares_config = /etc/cinder/nfs_exports + nfs_sparsed_volumes = true + nfs_mount_options = v3 + + [database] + connection = mysql+pymysql://cinder:CINDER_DBPASS@10.0.0.11/cinder + max_retries = -1 + + [keystone_authtoken] + # 10.0.0.11 is the Keystone VIP + identity_uri = http://10.0.0.11:35357/ + www_authenticate_uri = http://10.0.0.11:5000/ + admin_tenant_name = service + admin_user = cinder + admin_password = CINDER_PASS + + [oslo_messaging_rabbit] + # Explicitly list the rabbit hosts as it doesn't play well with HAProxy + rabbit_hosts = 10.0.0.12,10.0.0.13,10.0.0.14 + # As a consequence, we also need HA queues + rabbit_ha_queues = True + heartbeat_timeout_threshold = 60 + heartbeat_rate = 2 + +Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage +database. Replace ``CINDER_PASS`` with the password you chose for the +``cinder`` user in the Identity service. + +This example assumes that you are using NFS for the physical storage, which +will almost never be true in a production installation. + +If you are using the Block Storage service OCF agent, some settings will +be filled in for you, resulting in a shorter configuration file: + +.. code-block:: ini + :linenos: + + # We have to use MySQL connection to store data: + connection = mysql+pymysql://cinder:CINDER_DBPASS@10.0.0.11/cinder + # Alternatively, you can switch to pymysql, + # a new Python 3 compatible library and use + # sql_connection = mysql+pymysql://cinder:CINDER_DBPASS@10.0.0.11/cinder + # and be ready when everything moves to Python 3. + # Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation + + # We bind Block Storage API to the VIP: + osapi_volume_listen = 10.0.0.11 + + # We send notifications to High Available RabbitMQ: + notifier_strategy = rabbit + rabbit_host = 10.0.0.11 + +Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage +database. + +.. _ha-blockstorage-services: + +Configure OpenStack services to use the highly available Block Storage API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Your OpenStack services must now point their Block Storage API configuration +to the highly available, virtual cluster IP address rather than a Block Storage +API server’s physical IP address as you would for a non-HA environment. + +Create the Block Storage API endpoint with this IP. + +If you are using both private and public IP addresses, create two virtual IPs +and define your endpoint. For example: + +.. code-block:: console + + $ openstack endpoint create --region $KEYSTONE_REGION \ + volumev2 public http://PUBLIC_VIP:8776/v2/%\(project_id\)s + $ openstack endpoint create --region $KEYSTONE_REGION \ + volumev2 admin http://10.0.0.11:8776/v2/%\(project_id\)s + $ openstack endpoint create --region $KEYSTONE_REGION \ + volumev2 internal http://10.0.0.11:8776/v2/%\(project_id\)s + diff --git a/doc/source/storage-ha-file-systems.rst b/doc/source/storage-ha-file-systems.rst new file mode 100644 index 00000000..5ef3e2e8 --- /dev/null +++ b/doc/source/storage-ha-file-systems.rst @@ -0,0 +1,114 @@ +======================================== +Highly available Shared File Systems API +======================================== + +Making the Shared File Systems (manila) API service highly available +in active/passive mode involves: + +- :ref:`ha-sharedfilesystems-configure` +- :ref:`ha-sharedfilesystems-services` +- :ref:`ha-sharedfilesystems-pacemaker` + +.. _ha-sharedfilesystems-configure: + +Configure Shared File Systems API service +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Edit the :file:`/etc/manila/manila.conf` file: + +.. code-block:: ini + :linenos: + + # We have to use MySQL connection to store data: + sql_connection = mysql+pymysql://manila:password@10.0.0.11/manila?charset=utf8 + + # We bind Shared File Systems API to the VIP: + osapi_volume_listen = 10.0.0.11 + + # We send notifications to High Available RabbitMQ: + notifier_strategy = rabbit + rabbit_host = 10.0.0.11 + + +.. _ha-sharedfilesystems-services: + +Configure OpenStack services to use Shared File Systems API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Your OpenStack services must now point their Shared File Systems API +configuration to the highly available, virtual cluster IP address rather than +a Shared File Systems API server’s physical IP address as you would +for a non-HA environment. + +You must create the Shared File Systems API endpoint with this IP. + +If you are using both private and public IP addresses, you should create two +virtual IPs and define your endpoints like this: + +.. code-block:: console + + $ openstack endpoint create --region RegionOne \ + sharev2 public 'http://PUBLIC_VIP:8786/v2/%(tenant_id)s' + + $ openstack endpoint create --region RegionOne \ + sharev2 internal 'http://10.0.0.11:8786/v2/%(tenant_id)s' + + $ openstack endpoint create --region RegionOne \ + sharev2 admin 'http://10.0.0.11:8786/v2/%(tenant_id)s' + +.. _ha-sharedfilesystems-pacemaker: + +Add Shared File Systems API resource to Pacemaker +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Download the resource agent to your system: + + .. code-block:: console + + # cd /usr/lib/ocf/resource.d/openstack + # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/manila-api + # chmod a+rx * + +#. Add the Pacemaker configuration for the Shared File Systems + API resource. Connect to the Pacemaker cluster with the following + command: + + .. code-block:: console + + # crm configure + + .. note:: + + The :command:`crm configure` supports batch input. Copy and paste + the lines in the next step into your live Pacemaker configuration and then + make changes as required. + + For example, you may enter ``edit p_ip_manila-api`` from the + :command:`crm configure` menu and edit the resource to match your preferred + virtual IP address. + +#. Add the following cluster resources: + + .. code-block:: none + + primitive p_manila-api ocf:openstack:manila-api \ + params config="/etc/manila/manila.conf" \ + os_password="secretsecret" \ + os_username="admin" \ + os_tenant_name="admin" \ + keystone_get_token_url="http://10.0.0.11:5000/v2.0/tokens" \ + op monitor interval="30s" timeout="30s" + + This configuration creates ``p_manila-api``, a resource for managing the + Shared File Systems API service. + +#. Commit your configuration changes by entering the following command + from the :command:`crm configure` menu: + + .. code-block:: console + + # commit + +Pacemaker now starts the Shared File Systems API service and its +dependent resources on one of your nodes. + diff --git a/doc/source/storage-ha-image.rst b/doc/source/storage-ha-image.rst new file mode 100644 index 00000000..3b1de9c9 --- /dev/null +++ b/doc/source/storage-ha-image.rst @@ -0,0 +1,141 @@ +========================== +Highly available Image API +========================== + +The OpenStack Image service offers a service for discovering, registering, and +retrieving virtual machine images. To make the OpenStack Image API service +highly available in active/passive mode, you must: + +- :ref:`glance-api-pacemaker` +- :ref:`glance-api-configure` +- :ref:`glance-services` + +Prerequisites +~~~~~~~~~~~~~ + +Before beginning, ensure that you are familiar with the +documentation for installing the OpenStack Image API service. +See the *Image service* section in the +`Installation Guides `_, +depending on your distribution. + +.. _glance-api-pacemaker: + +Add OpenStack Image API resource to Pacemaker +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Download the resource agent to your system: + + .. code-block:: console + + # cd /usr/lib/ocf/resource.d/openstack + # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api + # chmod a+rx * + +#. Add the Pacemaker configuration for the OpenStack Image API resource. + Use the following command to connect to the Pacemaker cluster: + + .. code-block:: console + + crm configure + + .. note:: + + The :command:`crm configure` command supports batch input. Copy and paste + the lines in the next step into your live Pacemaker configuration and + then make changes as required. + + For example, you may enter ``edit p_ip_glance-api`` from the + :command:`crm configure` menu and edit the resource to match your + preferred virtual IP address. + +#. Add the following cluster resources: + + .. code-block:: console + + primitive p_glance-api ocf:openstack:glance-api \ + params config="/etc/glance/glance-api.conf" \ + os_password="secretsecret" \ + os_username="admin" os_tenant_name="admin" \ + os_auth_url="http://10.0.0.11:5000/v2.0/" \ + op monitor interval="30s" timeout="30s" + + This configuration creates ``p_glance-api``, a resource for managing the + OpenStack Image API service. + +#. Commit your configuration changes by entering the following command from + the :command:`crm configure` menu: + + .. code-block:: console + + commit + +Pacemaker then starts the OpenStack Image API service and its dependent +resources on one of your nodes. + +.. _glance-api-configure: + +Configure OpenStack Image service API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Edit the :file:`/etc/glance/glance-api.conf` file +to configure the OpenStack Image service: + +.. code-block:: ini + + # We have to use MySQL connection to store data: + sql_connection=mysql://glance:password@10.0.0.11/glance + # Alternatively, you can switch to pymysql, + # a new Python 3 compatible library and use + # sql_connection=mysql+pymysql://glance:password@10.0.0.11/glance + # and be ready when everything moves to Python 3. + # Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation + + # We bind OpenStack Image API to the VIP: + bind_host = 10.0.0.11 + + # Connect to OpenStack Image registry service: + registry_host = 10.0.0.11 + + # We send notifications to High Available RabbitMQ: + notifier_strategy = rabbit + rabbit_host = 10.0.0.11 + +[TODO: need more discussion of these parameters] + +.. _glance-services: + +Configure OpenStack services to use the highly available OpenStack Image API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Your OpenStack services must now point their OpenStack Image API configuration +to the highly available, virtual cluster IP address instead of pointing to the +physical IP address of an OpenStack Image API server as you would in a non-HA +cluster. + +For example, if your OpenStack Image API service IP address is 10.0.0.11 +(as in the configuration explained here), you would use the following +configuration in your :file:`nova.conf` file: + +.. code-block:: ini + + [glance] + # ... + api_servers = 10.0.0.11 + # ... + + +You must also create the OpenStack Image API endpoint with this IP address. +If you are using both private and public IP addresses, create two virtual IP +addresses and define your endpoint. For example: + +.. code-block:: console + + $ openstack endpoint create --region $KEYSTONE_REGION \ + image public http://PUBLIC_VIP:9292 + + $ openstack endpoint create --region $KEYSTONE_REGION \ + image admin http://10.0.0.11:9292 + + $ openstack endpoint create --region $KEYSTONE_REGION \ + image internal http://10.0.0.11:9292 diff --git a/doc/source/storage-ha.rst b/doc/source/storage-ha.rst new file mode 100644 index 00000000..22ea30c4 --- /dev/null +++ b/doc/source/storage-ha.rst @@ -0,0 +1,22 @@ +=================== +Configuring storage +=================== + +.. toctree:: + :maxdepth: 2 + + storage-ha-image.rst + storage-ha-block.rst + storage-ha-file-systems.rst + storage-ha-backend.rst + +Making the Block Storage (cinder) API service highly available in +active/active mode involves: + +* Configuring Block Storage to listen on the VIP address + +* Managing the Block Storage API daemon with the Pacemaker cluster manager + +* Configuring OpenStack services to use this IP address + +.. To Do: HA without Pacemaker diff --git a/doc/source/testing.rst b/doc/source/testing.rst new file mode 100644 index 00000000..3cb81103 --- /dev/null +++ b/doc/source/testing.rst @@ -0,0 +1,6 @@ +======= +Testing +======= + + + diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..d8b45864 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,27 @@ +[metadata] +name = openstackhaguide +summary = OpenStack High Availability Guide +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = https://docs.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Topic :: Documentation + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[files] + +[build_sphinx] +warning-is-error = 1 +build-dir = build +source-dir = source + +[wheel] +universal = 1 diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..73637574 --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/tox.ini b/tox.ini index 721ad3f4..01a49e10 100644 --- a/tox.ini +++ b/tox.ini @@ -14,3 +14,16 @@ deps = commands = doc8 doc/source -e txt -e rst sphinx-build -E -W -b html doc/source doc/build/html + +[doc8] +# Settings for doc8: +# Ignore target directories and autogenerated files +ignore-path = doc/*/target,doc/*/build* +# File extensions to use +extensions = .rst,.txt +# Maximal line length should be 79 but we have some overlong lines. +# Let's not get far more in. +max-line-length = 79 +# Disable some doc8 checks: +# D000: Check RST validity (cannot handle the "linenos" directive) +ignore = D000