diff --git a/doc/source/_static/manila.conf.sample b/doc/source/_static/manila.conf.sample new file mode 100644 index 0000000000..b67e70a7df --- /dev/null +++ b/doc/source/_static/manila.conf.sample @@ -0,0 +1,3216 @@ +[DEFAULT] + +# +# From manila +# + +# The maximum number of items returned in a single response from a +# collection resource. (integer value) +#osapi_max_limit = 1000 + +# Base URL to be presented to users in links to the Share API (string +# value) +#osapi_share_base_URL = + +# Treat X-Forwarded-For as the canonical remote address. Only enable +# this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for = false + +# Top-level directory for maintaining manila's state. (string value) +#state_path = /var/lib/manila + +# Region name of this node. (string value) +#os_region_name = + +# Warning: Failed to format sample for my_ip +# unhashable type: 'HostAddress' + +# The topic scheduler nodes listen on. (string value) +#scheduler_topic = manila-scheduler + +# The topic share nodes listen on. (string value) +#share_topic = manila-share + +# The topic data nodes listen on. (string value) +#data_topic = manila-data + +# Whether to rate limit the API. (boolean value) +#api_rate_limit = true + +# Specify list of extensions to load when using osapi_share_extension +# option with manila.api.contrib.select_extensions. (list value) +#osapi_share_ext_list = + +# The osapi share extensions to load. (list value) +#osapi_share_extension = manila.api.contrib.standard_extensions + +# The filename to use with sqlite. (string value) +#sqlite_db = manila.sqlite + +# If passed, use synchronous mode for sqlite. (boolean value) +#sqlite_synchronous = true + +# Timeout before idle SQL connections are reaped. (integer value) +#sql_idle_timeout = 3600 + +# Maximum database connection retries during startup. (setting -1 +# implies an infinite retry count). (integer value) +#sql_max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer +# value) +#sql_retry_interval = 10 + +# Full class name for the scheduler manager. (string value) +#scheduler_manager = manila.scheduler.manager.SchedulerManager + +# Full class name for the share manager. (string value) +#share_manager = manila.share.manager.ShareManager + +# Full class name for the data manager. (string value) +#data_manager = manila.data.manager.DataManager + +# Warning: Failed to format sample for host +# unhashable type: 'HostAddress' + +# Availability zone of this node. (string value) +#storage_availability_zone = nova + +# Default share type to use. (string value) +#default_share_type = + +# Default share group type to use. (string value) +#default_share_group_type = + +# Memcached servers or None for in process cache. (list value) +#memcached_servers = + +# Time period to generate share usages for. Time period must be hour, +# day, month or year. (string value) +#share_usage_audit_period = month + +# Deprecated: command to use for running commands as root. (string +# value) +#root_helper = sudo + +# Path to the rootwrap configuration file to use for running commands +# as root. (string value) +#rootwrap_config = + +# Whether to log monkey patching. (boolean value) +#monkey_patch = false + +# List of modules or decorators to monkey patch. (list value) +#monkey_patch_modules = + +# Maximum time since last check-in for up service. (integer value) +#service_down_time = 60 + +# The full class name of the share API class to use. (string value) +#share_api_class = manila.share.api.API + +# The strategy to use for auth. Supports noauth, keystone, and +# deprecated. (string value) +#auth_strategy = keystone + +# A list of share backend names to use. These backend names should be +# backed by a unique [CONFIG] group with its options. (list value) +#enabled_share_backends = + +# Specify list of protocols to be allowed for share creation. +# Available values are '('NFS', 'CIFS', 'GLUSTERFS', 'HDFS', 'CEPHFS', +# 'MAPRFS')' (list value) +#enabled_share_protocols = NFS,CIFS + +# The full class name of the Compute API class to use. (string value) +#compute_api_class = manila.compute.nova.API + +# The back end URL to use for distributed coordination. (string value) +#backend_url = file://$state_path + +# DEPRECATED: Number of seconds between heartbeats for distributed +# coordination. No longer used since distributed coordination manages +# its heartbeat internally. (floating point value) +# This option is deprecated for removal since 5.0.0. +# Its value may be silently ignored in the future. +# Reason: This option is no longer used. +#heartbeat = 1.0 + +# DEPRECATED: Initial number of seconds to wait after failed +# reconnection. No longer used since distributed coordination manages +# its heartbeat internally. (floating point value) +# This option is deprecated for removal since 5.0.0. +# Its value may be silently ignored in the future. +# Reason: This option is no longer used. +#initial_reconnect_backoff = 0.1 + +# DEPRECATED: Maximum number of seconds between sequential +# reconnection retries. No longer used since distributed coordination +# manages its heartbeat internally. (floating point value) +# This option is deprecated for removal since 5.0.0. +# Its value may be silently ignored in the future. +# Reason: This option is no longer used. +#max_reconnect_backoff = 60.0 + +# The backend to use for database. (string value) +#db_backend = sqlalchemy + +# Services to be added to the available pool on create. (boolean +# value) +#enable_new_services = true + +# Template string to be used to generate share names. (string value) +#share_name_template = share-%s + +# Template string to be used to generate share snapshot names. (string +# value) +#share_snapshot_name_template = share-snapshot-%s + +# Driver to use for database access. (string value) +#db_driver = manila.db + +# Whether to make exception message format errors fatal. (boolean +# value) +#fatal_exception_format_errors = false + +# Message minimum life in seconds. (integer value) +#message_ttl = 2592000 + +# Interval between periodic task runs to clean expired messages in +# seconds. (integer value) +#message_reap_interval = 86400 + +# Name of Open vSwitch bridge to use. (string value) +#ovs_integration_bridge = br-int + +# The full class name of the Networking API class to use. (string +# value) +#network_api_class = manila.network.neutron.neutron_network_plugin.NeutronNetworkPlugin + +# vNIC type used for binding. (string value) +# Allowed values: baremetal, normal, direct, direct-physical, macvtap +#neutron_vnic_type = baremetal + +# Host ID to be used when creating neutron port. If not set host is +# set to manila-share host by default. (string value) +#neutron_host_id = tbarron + +# Default Neutron network that will be used for share server creation. +# This opt is used only with class 'NeutronSingleNetworkPlugin'. +# (string value) +#neutron_net_id = + +# Default Neutron subnet that will be used for share server creation. +# Should be assigned to network defined in opt 'neutron_net_id'. This +# opt is used only with class 'NeutronSingleNetworkPlugin'. (string +# value) +#neutron_subnet_id = + +# Gateway address that should be used. Required. (string value) +#standalone_network_plugin_gateway = + +# Network mask that will be used. Can be either decimal like '24' or +# binary like '255.255.255.0'. Required. (string value) +#standalone_network_plugin_mask = + +# Network type, such as 'flat', 'vlan', 'vxlan' or 'gre'. Empty value +# is alias for 'flat'. It will be assigned to share-network and share +# drivers will be able to use this for network interfaces within +# provisioned share servers. Optional. (string value) +# Allowed values: flat, vlan, vxlan, gre +#standalone_network_plugin_network_type = + +# Set it if network has segmentation (VLAN, VXLAN, etc...). It will be +# assigned to share-network and share drivers will be able to use this +# for network interfaces within provisioned share servers. Optional. +# Example: 1001 (integer value) +#standalone_network_plugin_segmentation_id = + +# Can be IP address, range of IP addresses or list of addresses or +# ranges. Contains addresses from IP network that are allowed to be +# used. If empty, then will be assumed that all host addresses from +# network can be used. Optional. Examples: 10.0.0.10 or +# 10.0.0.10-10.0.0.20 or +# 10.0.0.10-10.0.0.20,10.0.0.30-10.0.0.40,10.0.0.50 (list value) +#standalone_network_plugin_allowed_ip_ranges = + +# DEPRECATED: IP version of network. Optional.Allowed values are '4' +# and '6'. Default value is '4'. Note: This option is no longer used +# and has no effect (integer value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: This option has been replaced by +# 'network_plugin_ipv4_enabled' and 'network_plugin_ipv6_enabled' +# options. +#standalone_network_plugin_ip_version = 4 + +# Maximum Transmission Unit (MTU) value of the network. Default value +# is 1500. (integer value) +#standalone_network_plugin_mtu = 1500 + +# Number of shares allowed per project. (integer value) +#quota_shares = 50 + +# Number of share snapshots allowed per project. (integer value) +#quota_snapshots = 50 + +# Number of share gigabytes allowed per project. (integer value) +#quota_gigabytes = 1000 + +# Number of snapshot gigabytes allowed per project. (integer value) +#quota_snapshot_gigabytes = 1000 + +# Number of share-networks allowed per project. (integer value) +#quota_share_networks = 10 + +# Number of share groups allowed. (integer value) +#quota_share_groups = 50 + +# Number of share group snapshots allowed. (integer value) +#quota_share_group_snapshots = 50 + +# Number of seconds until a reservation expires. (integer value) +#reservation_expire = 86400 + +# Count of reservations until usage is refreshed. (integer value) +#until_refresh = 0 + +# Number of seconds between subsequent usage refreshes. (integer +# value) +#max_age = 0 + +# Default driver to use for quota checks. (string value) +#quota_driver = manila.quota.DbQuotaDriver + +# The scheduler host manager class to use. (string value) +#scheduler_host_manager = manila.scheduler.host_manager.HostManager + +# Maximum number of attempts to schedule a share. (integer value) +#scheduler_max_attempts = 3 + +# Which filter class names to use for filtering hosts when not +# specified in the request. (list value) +#scheduler_default_filters = AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter,DriverFilter,ShareReplicationFilter + +# Which weigher class names to use for weighing hosts. (list value) +#scheduler_default_weighers = CapacityWeigher,GoodnessWeigher + +# Which filter class names to use for filtering hosts creating share +# group when not specified in the request. (list value) +#scheduler_default_share_group_filters = AvailabilityZoneFilter,ConsistentSnapshotFilter + +# Default scheduler driver to use. (string value) +#scheduler_driver = manila.scheduler.drivers.filter.FilterScheduler + +# Absolute path to scheduler configuration JSON file. (string value) +#scheduler_json_config_location = + +# Maximum number of volume gigabytes to allow per host. (integer +# value) +#max_gigabytes = 10000 + +# Multiplier used for weighing share capacity. Negative numbers mean +# to stack vs spread. (floating point value) +#capacity_weight_multiplier = 1.0 + +# Multiplier used for weighing pools which have existing share +# servers. Negative numbers mean to spread vs stack. (floating point +# value) +#pool_weight_multiplier = 1.0 + +# Seconds between nodes reporting state to datastore. (integer value) +#report_interval = 10 + +# Seconds between running periodic tasks. (integer value) +#periodic_interval = 60 + +# Range of seconds to randomly delay when starting the periodic task +# scheduler to reduce stampeding. (Disable by setting to 0) (integer +# value) +#periodic_fuzzy_delay = 60 + +# Warning: Failed to format sample for osapi_share_listen +# unhashable type: 'HostAddress' + +# Port for OpenStack Share API to listen on. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#osapi_share_listen_port = 8786 + +# Number of workers for OpenStack Share API service. (integer value) +#osapi_share_workers = 1 + +# If set to False, then share creation from snapshot will be performed +# on the same host. If set to True, then scheduling step will be used. +# (boolean value) +#use_scheduler_creating_share_from_snapshot = false + +# Directory where Ganesha config files are stored. (string value) +#ganesha_config_dir = /etc/ganesha + +# Path to main Ganesha config file. (string value) +#ganesha_config_path = $ganesha_config_dir/ganesha.conf + +# DEPRECATED: Options to use when exporting a share using ganesha NFS +# server. Note that these defaults can be overridden when a share is +# created by passing metadata with key name export_options. Also note +# the complete set of default ganesha export options is specified in +# ganesha_utils. (GPFS only.) (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: This option is no longer used. +#ganesha_nfs_export_options = maxread = 65536, prefread = 65536 + +# Name of the ganesha nfs service. (string value) +#ganesha_service_name = ganesha.nfsd + +# Location of Ganesha database file. (Ganesha module only.) (string +# value) +#ganesha_db_path = $state_path/manila-ganesha.db + +# Path to directory containing Ganesha export configuration. (Ganesha +# module only.) (string value) +#ganesha_export_dir = $ganesha_config_dir/export.d + +# Path to directory containing Ganesha export block templates. +# (Ganesha module only.) (string value) +#ganesha_export_template_dir = /etc/manila/ganesha-export-templ.d + +# Number of times to attempt to run flakey shell commands. (integer +# value) +#num_shell_tries = 3 + +# The percentage of backend capacity reserved. (integer value) +#reserved_share_percentage = 0 + +# The backend name for a given driver implementation. (string value) +#share_backend_name = + +# Name of the configuration group in the Manila conf file to look for +# network config options.If not set, the share backend's config group +# will be used.If an option is not found within provided group, +# then'DEFAULT' group will be used for search of option. (string +# value) +#network_config_group = + +# There are two possible approaches for share drivers in Manila. First +# is when share driver is able to handle share-servers and second when +# not. Drivers can support either both or only one of these +# approaches. So, set this opt to True if share driver is able to +# handle share servers and it is desired mode else set False. It is +# set to None by default to make this choice intentional. (boolean +# value) +#driver_handles_share_servers = + +# Float representation of the over subscription ratio when thin +# provisioning is involved. Default ratio is 20.0, meaning provisioned +# capacity can be 20 times the total physical capacity. If the ratio +# is 10.5, it means provisioned capacity can be 10.5 times the total +# physical capacity. A ratio of 1.0 means provisioned capacity cannot +# exceed the total physical capacity. A ratio lower than 1.0 is +# invalid. (floating point value) +#max_over_subscription_ratio = 20.0 + +# List of files and folders to be ignored when migrating shares. Items +# should be names (not including any path). (list value) +#migration_ignore_files = lost+found + +# The template for mounting shares for this backend. Must specify the +# executable with all necessary parameters for the protocol supported. +# 'proto' template element may not be required if included in the +# command. 'export' and 'path' template elements are required. It is +# advisable to separate different commands per backend. (string value) +#share_mount_template = mount -vt %(proto)s %(options)s %(export)s %(path)s + +# The template for unmounting shares for this backend. Must specify +# the executable with all necessary parameters for the protocol +# supported. 'path' template element is required. It is advisable to +# separate different commands per backend. (string value) +#share_unmount_template = umount -v %(path)s + +# Protocol access mapping for this backend. Should be a dictionary +# comprised of {'access_type1': ['share_proto1', 'share_proto2'], +# 'access_type2': ['share_proto2', 'share_proto3']}. (dict value) +#protocol_access_mapping = ip:['nfs'],user:['cifs'] + +# DEPRECATED: Specify whether read only access rule mode is supported +# in this backend. Obsolete. (boolean value) +# Deprecated group/name - [DEFAULT]/migration_readonly_support +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: All drivers are now required to support read-only access +# rules. +#migration_readonly_rules_support = true + +# If share driver requires to setup admin network for share, then +# define network plugin config options in some separate config group +# and set its name here. Used only with another option +# 'driver_handles_share_servers' set to 'True'. (string value) +#admin_network_config_group = + +# A string specifying the replication domain that the backend belongs +# to. This option needs to be specified the same in the configuration +# sections of all backends that support replication between each +# other. If this option is not specified in the group, it means that +# replication is not enabled on the backend. (string value) +#replication_domain = + +# String representation for an equation that will be used to filter +# hosts. (string value) +#filter_function = + +# String representation for an equation that will be used to determine +# the goodness of a host. (string value) +#goodness_function = + +# Backend server SSH connection timeout. (integer value) +#ssh_conn_timeout = 60 + +# Minimum number of connections in the SSH pool. (integer value) +#ssh_min_pool_conn = 1 + +# Maximum number of connections in the SSH pool. (integer value) +#ssh_max_pool_conn = 10 + +# The full class name of the Private Data Driver class to use. (string +# value) +#drivers_private_storage_class = manila.share.drivers_private_data.SqlStorageDriver + +# Fully qualified path to the ceph.conf file. (string value) +#cephfs_conf_path = + +# The name of the cluster in use, if it is not the default ('ceph'). +# (string value) +#cephfs_cluster_name = + +# The name of the ceph auth identity to use. (string value) +#cephfs_auth_id = manila + +# Whether to enable snapshots in this driver. (boolean value) +#cephfs_enable_snapshots = false + +# The type of protocol helper to use. Default is CEPHFS. (string +# value) +# Allowed values: CEPHFS, NFS +#cephfs_protocol_helper_type = CEPHFS + +# Whether the NFS-Ganesha server is remote to the driver. (boolean +# value) +#cephfs_ganesha_server_is_remote = false + +# The IP address of the NFS-Ganesha server. (string value) +#cephfs_ganesha_server_ip = + +# The username to authenticate as in the remote NFS-Ganesha server +# host. (string value) +#cephfs_ganesha_server_username = root + +# The path of the driver host's private SSH key file. (string value) +#cephfs_ganesha_path_to_private_key = + +# The password to authenticate as the user in the remote Ganesha +# server host. This is not required if +# 'cephfs_ganesha_path_to_private_key' is configured. (string value) +#cephfs_ganesha_server_password = + +# Linux bridge used by container hypervisor to plug host-side veth to. +# It will be unplugged from here by the driver. (string value) +#container_linux_bridge_name = docker0 + +# OVS bridge to use to plug a container to. (string value) +#container_ovs_bridge_name = br-int + +# Determines whether to allow guest access to CIFS share or not. +# (boolean value) +#container_cifs_guest_ok = true + +# Image to be used for a container-based share server. (string value) +#container_image_name = manila-docker-container + +# Container helper which provides container-related operations to the +# driver. (string value) +#container_helper = manila.share.drivers.container.container_helper.DockerExecHelper + +# Helper which facilitates interaction with share server. (string +# value) +#container_protocol_helper = manila.share.drivers.container.protocol_helper.DockerCIFSHelper + +# Helper which facilitates interaction with storage solution used to +# actually store data. By default LVM is used to provide storage for a +# share. (string value) +#container_storage_helper = manila.share.drivers.container.storage_helper.LVMHelper + +# LVM volume group to use for volumes. This volume group must be +# created by the cloud administrator independently from manila +# operations. (string value) +#container_volume_group = manila_docker_volumes + +# User name for the EMC server. (string value) +#emc_nas_login = + +# Password for the EMC server. (string value) +#emc_nas_password = + +# Warning: Failed to format sample for emc_nas_server +# unhashable type: 'HostAddress' + +# Port number for the EMC server. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#emc_nas_server_port = 8080 + +# Use secure connection to server. (boolean value) +#emc_nas_server_secure = true + +# Share backend. (string value) +# Allowed values: isilon, vnx, unity, vmax +#emc_share_backend = + +# The root directory where shares will be located. (string value) +#emc_nas_root_dir = + +# If set to False the https client will not validate the SSL +# certificate of the backend endpoint. (boolean value) +#emc_ssl_cert_verify = true + +# Can be used to specify a non default path to a CA_BUNDLE file or +# directory with certificates of trusted CAs, which will be used to +# validate the backend. (string value) +#emc_ssl_cert_path = + +# Data mover to host the NAS server. (string value) +#vmax_server_container = + +# Comma separated list of pools that can be used to persist share +# data. (list value) +#vmax_share_data_pools = + +# Comma separated list of ports that can be used for share server +# interfaces. Members of the list can be Unix-style glob expressions. +# (list value) +#vmax_ethernet_ports = + +# Path to smb config. (string value) +#smb_template_config_path = $state_path/smb.conf + +# Volume name template. (string value) +#volume_name_template = manila-share-%s + +# Volume snapshot name template. (string value) +#volume_snapshot_name_template = manila-snapshot-%s + +# Parent path in service instance where shares will be mounted. +# (string value) +#share_mount_path = /shares + +# Maximum time to wait for creating cinder volume. (integer value) +#max_time_to_create_volume = 180 + +# Maximum time to wait for extending cinder volume. (integer value) +#max_time_to_extend_volume = 180 + +# Maximum time to wait for attaching cinder volume. (integer value) +#max_time_to_attach = 120 + +# Path to SMB config in service instance. (string value) +#service_instance_smb_config_path = $share_mount_path/smb.conf + +# Specify list of share export helpers. (list value) +#share_helpers = CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess,NFS=manila.share.drivers.helpers.NFSHelper + +# Filesystem type of the share volume. (string value) +# Allowed values: ext4, ext3 +#share_volume_fstype = ext4 + +# Name or id of cinder volume type which will be used for all volumes +# created by driver. (string value) +#cinder_volume_type = + +# Remote GlusterFS server node's login password. This is not required +# if 'glusterfs_path_to_private_key' is configured. (string value) +# Deprecated group/name - [DEFAULT]/glusterfs_native_server_password +#glusterfs_server_password = + +# Path of Manila host's private SSH key file. (string value) +# Deprecated group/name - [DEFAULT]/glusterfs_native_path_to_private_key +#glusterfs_path_to_private_key = + +# Type of NFS server that mediate access to the Gluster volumes +# (Gluster or Ganesha). (string value) +#glusterfs_nfs_server_type = Gluster + +# Warning: Failed to format sample for glusterfs_ganesha_server_ip +# unhashable type: 'HostAddress' + +# Remote Ganesha server node's username. (string value) +#glusterfs_ganesha_server_username = root + +# Remote Ganesha server node's login password. This is not required if +# 'glusterfs_path_to_private_key' is configured. (string value) +#glusterfs_ganesha_server_password = + +# Specifies GlusterFS share layout, that is, the method of associating +# backing GlusterFS resources to shares. (string value) +#glusterfs_share_layout = + +# Specifies the GlusterFS volume to be mounted on the Manila host. It +# is of the form [remoteuser@]:. (string value) +#glusterfs_target = + +# Base directory containing mount points for Gluster volumes. (string +# value) +#glusterfs_mount_point_base = $state_path/mnt + +# List of GlusterFS servers that can be used to create shares. Each +# GlusterFS server should be of the form [remoteuser@], and +# they are assumed to belong to distinct Gluster clusters. (list +# value) +# Deprecated group/name - [DEFAULT]/glusterfs_targets +#glusterfs_servers = + +# Regular expression template used to filter GlusterFS volumes for +# share creation. The regex template can optionally (ie. with support +# of the GlusterFS backend) contain the #{size} parameter which +# matches an integer (sequence of digits) in which case the value +# shall be interpreted as size of the volume in GB. Examples: "manila- +# share-volume-\d+$", "manila-share-volume-#{size}G-\d+$"; with +# matching volume names, respectively: "manila-share-volume-12", +# "manila-share-volume-3G-13". In latter example, the number that +# matches "#{size}", that is, 3, is an indication that the size of +# volume is 3G. (string value) +#glusterfs_volume_pattern = + +# Warning: Failed to format sample for hdfs_namenode_ip +# unhashable type: 'HostAddress' + +# The port of HDFS namenode service. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#hdfs_namenode_port = 9000 + +# HDFS namenode SSH port. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#hdfs_ssh_port = 22 + +# HDFS namenode ssh login name. (string value) +#hdfs_ssh_name = + +# HDFS namenode SSH login password, This parameter is not necessary, +# if 'hdfs_ssh_private_key' is configured. (string value) +#hdfs_ssh_pw = + +# Path to HDFS namenode SSH private key for login. (string value) +#hdfs_ssh_private_key = + +# Warning: Failed to format sample for hitachi_hnas_ip +# unhashable type: 'HostAddress' + +# HNAS username Base64 String in order to perform tasks such as create +# file-systems and network interfaces. (string value) +# Deprecated group/name - [DEFAULT]/hds_hnas_user +#hitachi_hnas_user = + +# HNAS user password. Required only if private key is not provided. +# (string value) +# Deprecated group/name - [DEFAULT]/hds_hnas_password +#hitachi_hnas_password = + +# Specify which EVS this backend is assigned to. (integer value) +# Deprecated group/name - [DEFAULT]/hds_hnas_evs_id +#hitachi_hnas_evs_id = + +# Warning: Failed to format sample for hitachi_hnas_evs_ip +# unhashable type: 'HostAddress' + +# Warning: Failed to format sample for hitachi_hnas_admin_network_ip +# unhashable type: 'HostAddress' + +# Specify file-system name for creating shares. (string value) +# Deprecated group/name - [DEFAULT]/hds_hnas_file_system_name +#hitachi_hnas_file_system_name = + +# RSA/DSA private key value used to connect into HNAS. Required only +# if password is not provided. (string value) +# Deprecated group/name - [DEFAULT]/hds_hnas_ssh_private_key +#hitachi_hnas_ssh_private_key = + +# Warning: Failed to format sample for hitachi_hnas_cluster_admin_ip0 +# unhashable type: 'HostAddress' + +# The time (in seconds) to wait for stalled HNAS jobs before aborting. +# (integer value) +# Deprecated group/name - [DEFAULT]/hds_hnas_stalled_job_timeout +#hitachi_hnas_stalled_job_timeout = 30 + +# Python class to be used for driver helper. (string value) +# Deprecated group/name - [DEFAULT]/hds_hnas_driver_helper +#hitachi_hnas_driver_helper = manila.share.drivers.hitachi.hnas.ssh.HNASSSHBackend + +# By default, CIFS snapshots are not allowed to be taken when the +# share has clients connected because consistent point-in-time replica +# cannot be guaranteed for all files. Enabling this might cause +# inconsistent snapshots on CIFS shares. (boolean value) +# Deprecated group/name - [DEFAULT]/hds_hnas_allow_cifs_snapshot_while_mounted +#hitachi_hnas_allow_cifs_snapshot_while_mounted = false + +# Warning: Failed to format sample for hitachi_hsp_host +# unhashable type: 'HostAddress' + +# HSP username to perform tasks such as create filesystems and shares. +# (string value) +#hitachi_hsp_username = + +# HSP password for the username provided. (string value) +#hitachi_hsp_password = + +# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 (string +# value) +# Deprecated group/name - [DEFAULT]/hp3par_api_url +#hpe3par_api_url = + +# 3PAR username with the 'edit' role (string value) +# Deprecated group/name - [DEFAULT]/hp3par_username +#hpe3par_username = + +# 3PAR password for the user specified in hpe3par_username (string +# value) +# Deprecated group/name - [DEFAULT]/hp3par_password +#hpe3par_password = + +# Warning: Failed to format sample for hpe3par_san_ip +# unhashable type: 'HostAddress' + +# Username for SAN controller (string value) +# Deprecated group/name - [DEFAULT]/hp3par_san_login +#hpe3par_san_login = + +# Password for SAN controller (string value) +# Deprecated group/name - [DEFAULT]/hp3par_san_password +#hpe3par_san_password = + +# SSH port to use with SAN (port value) +# Minimum value: 0 +# Maximum value: 65535 +# Deprecated group/name - [DEFAULT]/hp3par_san_ssh_port +#hpe3par_san_ssh_port = 22 + +# The File Provisioning Group (FPG) to use (FPG) +# Deprecated group/name - [DEFAULT]/hp3par_fpg +#hpe3par_fpg = + +# Use one filestore per share (boolean value) +# Deprecated group/name - [DEFAULT]/hp3par_fstore_per_share +#hpe3par_fstore_per_share = false + +# Require IP access rules for CIFS (in addition to user) (boolean +# value) +#hpe3par_require_cifs_ip = false + +# Enable HTTP debugging to 3PAR (boolean value) +# Deprecated group/name - [DEFAULT]/hp3par_debug +#hpe3par_debug = false + +# File system admin user name for CIFS. (string value) +# Deprecated group/name - [DEFAULT]/hp3par_cifs_admin_access_username +#hpe3par_cifs_admin_access_username = + +# File system admin password for CIFS. (string value) +# Deprecated group/name - [DEFAULT]/hp3par_cifs_admin_access_password +#hpe3par_cifs_admin_access_password = + +# File system domain for the CIFS admin user. (string value) +# Deprecated group/name - [DEFAULT]/hp3par_cifs_admin_access_domain +#hpe3par_cifs_admin_access_domain = LOCAL_CLUSTER + +# The path where shares will be mounted when deleting nested file +# trees. (string value) +# Deprecated group/name - [DEFAULT]/hpe3par_share_mount_path +#hpe3par_share_mount_path = /mnt/ + +# The configuration file for the Manila Huawei driver. (string value) +#manila_huawei_conf_file = /etc/manila/manila_huawei_conf.xml + +# Warning: Failed to format sample for gpfs_share_export_ip +# unhashable type: 'HostAddress' + +# Base folder where exported shares are located. (string value) +#gpfs_mount_point_base = $state_path/mnt + +# NFS Server type. Valid choices are "CES" (Ganesha NFS) or "KNFS" +# (Kernel NFS). (string value) +#gpfs_nfs_server_type = CES + +# A list of the fully qualified NFS server names that make up the +# OpenStack Manila configuration. (list value) +#gpfs_nfs_server_list = + +# True:when Manila services are running on one of the Spectrum Scale +# node. False:when Manila services are not running on any of the +# Spectrum Scale node. (boolean value) +#is_gpfs_node = false + +# GPFS server SSH port. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#gpfs_ssh_port = 22 + +# GPFS server SSH login name. (string value) +#gpfs_ssh_login = + +# GPFS server SSH login password. The password is not needed, if +# 'gpfs_ssh_private_key' is configured. (string value) +#gpfs_ssh_password = + +# Path to GPFS server SSH private key for login. (string value) +#gpfs_ssh_private_key = + +# Specify list of share export helpers. (list value) +#gpfs_share_helpers = KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper,CES=manila.share.drivers.ibm.gpfs.CESHelper + +# DEPRECATED: Options to use when exporting a share using kernel NFS +# server. Note that these defaults can be overridden when a share is +# created by passing metadata with key name export_options. (string +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: This option isn't used any longer. Please use share-type +# extra specs for export options. +#knfs_export_options = rw,sync,no_root_squash,insecure,no_wdelay,no_subtree_check + +# The list of IPs or hostnames of nodes where mapr-core is installed. +# (list value) +#maprfs_clinode_ip = + +# CLDB node SSH port. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#maprfs_ssh_port = 22 + +# Cluster admin user ssh login name. (string value) +#maprfs_ssh_name = mapr + +# Cluster node SSH login password, This parameter is not necessary, if +# 'maprfs_ssh_private_key' is configured. (string value) +#maprfs_ssh_pw = + +# Path to SSH private key for login. (string value) +#maprfs_ssh_private_key = + +# Path in MapRFS where share volumes must be created. (string value) +#maprfs_base_volume_dir = / + +# The list of IPs or hostnames of ZooKeeper nodes. (list value) +#maprfs_zookeeper_ip = + +# The list of IPs or hostnames of CLDB nodes. (list value) +#maprfs_cldb_ip = + +# Specify whether existing volume should be renamed when start +# managing. (boolean value) +#maprfs_rename_managed_volume = true + +# Base folder where exported shares are located. (string value) +#lvm_share_export_root = $state_path/mnt + +# DEPRECATED: IP to be added to export string. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Use lvm_share_export_ips instead. +#lvm_share_export_ip = + +# List of IPs to export shares. (list value) +#lvm_share_export_ips = + +# If set, create LVMs with multiple mirrors. Note that this requires +# lvm_mirrors + 2 PVs with available space. (integer value) +#lvm_share_mirrors = 0 + +# Name for the VG that will contain exported shares. (string value) +#lvm_share_volume_group = lvm-shares + +# Specify list of share export helpers. (list value) +#lvm_share_helpers = CIFS=manila.share.drivers.helpers.CIFSHelperUserAccess,NFS=manila.share.drivers.helpers.NFSHelper + +# The storage family type used on the storage system; valid values +# include ontap_cluster for using clustered Data ONTAP. (string value) +#netapp_storage_family = ontap_cluster + +# Warning: Failed to format sample for netapp_server_hostname +# unhashable type: 'HostAddress' + +# The TCP port to use for communication with the storage system or +# proxy server. If not specified, Data ONTAP drivers will use 80 for +# HTTP and 443 for HTTPS. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#netapp_server_port = + +# The transport protocol used when communicating with the storage +# system or proxy server. Valid values are http or https. (string +# value) +# Deprecated group/name - [DEFAULT]/netapp_nas_transport_type +#netapp_transport_type = http + +# Administrative user account name used to access the storage system. +# (string value) +# Deprecated group/name - [DEFAULT]/netapp_nas_login +#netapp_login = + +# Password for the administrative user account specified in the +# netapp_login option. (string value) +# Deprecated group/name - [DEFAULT]/netapp_nas_password +#netapp_password = + +# The NFS protocol versions that will be enabled. Supported values +# include nfs3, nfs4.0, nfs4.1. This option only applies when the +# option driver_handles_share_servers is set to True. (list value) +#netapp_enabled_share_protocols = nfs3,nfs4.0 + +# NetApp volume name template. (string value) +# Deprecated group/name - [DEFAULT]/netapp_nas_volume_name_template +#netapp_volume_name_template = share_%(share_id)s + +# Name template to use for new Vserver. (string value) +#netapp_vserver_name_template = os_%s + +# NetApp QoS policy group name template. (string value) +#netapp_qos_policy_group_name_template = qos_share_%(share_id)s + +# Pattern for overriding the selection of network ports on which to +# create Vserver LIFs. (string value) +#netapp_port_name_search_pattern = (.*) + +# Logical interface (LIF) name template (string value) +#netapp_lif_name_template = os_%(net_allocation_id)s + +# Pattern for searching available aggregates for provisioning. (string +# value) +#netapp_aggregate_name_search_pattern = (.*) + +# Name of aggregate to create Vserver root volumes on. This option +# only applies when the option driver_handles_share_servers is set to +# True. (string value) +#netapp_root_volume_aggregate = + +# Root volume name. (string value) +# Deprecated group/name - [DEFAULT]/netapp_root_volume_name +#netapp_root_volume = root + +# The percentage of share space set aside as reserve for snapshot +# usage; valid values range from 0 to 90. (integer value) +# Minimum value: 0 +# Maximum value: 90 +#netapp_volume_snapshot_reserve_percent = 5 + +# The maximum time in seconds to wait for existing snapmirror +# transfers to complete before aborting when promoting a replica. +# (integer value) +# Minimum value: 0 +#netapp_snapmirror_quiesce_timeout = 3600 + +# The maximum time in seconds to wait for the completion of a volume +# move operation after the cutover was triggered. (integer value) +# Minimum value: 0 +#netapp_volume_move_cutover_timeout = 3600 + +# Warning: Failed to format sample for nexenta_host +# unhashable type: 'HostAddress' + +# Port to connect to Nexenta REST API server. (integer value) +#nexenta_rest_port = 8457 + +# Number of retries for unsuccessful API calls. (integer value) +#nexenta_retry_count = 6 + +# Use http or https for REST connection (default auto). (string value) +# Allowed values: http, https, auto +#nexenta_rest_protocol = auto + +# User name to connect to Nexenta SA. (string value) +#nexenta_user = admin + +# Password to connect to Nexenta SA. (string value) +#nexenta_password = + +# Volume name on NexentaStor. (string value) +#nexenta_volume = volume1 + +# Pool name on NexentaStor. (string value) +#nexenta_pool = pool1 + +# On if share over NFS is enabled. (boolean value) +#nexenta_nfs = true + +# Parent folder on NexentaStor. (string value) +#nexenta_nfs_share = nfs_share + +# Compression value for new ZFS folders. (string value) +# Allowed values: on, off, gzip, gzip-1, gzip-2, gzip-3, gzip-4, gzip-5, gzip-6, gzip-7, gzip-8, gzip-9, lzjb, zle, lz4 +#nexenta_dataset_compression = on + +# Deduplication value for new ZFS folders. (string value) +# Allowed values: on, off, sha256, verify, sha256, verify +#nexenta_dataset_dedupe = off + +# If True shares will not be space guaranteed and overprovisioning +# will be enabled. (boolean value) +#nexenta_thin_provisioning = true + +# Base directory that contains NFS share mount points. (string value) +#nexenta_mount_point_base = $state_path/mnt + +# The URL to manage QNAP Storage. (string value) +#qnap_management_url = + +# Warning: Failed to format sample for qnap_share_ip +# unhashable type: 'HostAddress' + +# Username for QNAP storage. (string value) +#qnap_nas_login = + +# Password for QNAP storage. (string value) +#qnap_nas_password = + +# Pool within which QNAP shares must be created. (string value) +#qnap_poolname = + +# URL of the Quobyte API server (http or https) (string value) +#quobyte_api_url = + +# The X.509 CA file to verify the server cert. (string value) +#quobyte_api_ca = + +# Actually deletes shares (vs. unexport) (boolean value) +#quobyte_delete_shares = false + +# Username for Quobyte API server. (string value) +#quobyte_api_username = admin + +# Password for Quobyte API server (string value) +#quobyte_api_password = quobyte + +# Name of volume configuration used for new shares. (string value) +#quobyte_volume_configuration = BASE + +# Default owning user for new volumes. (string value) +#quobyte_default_volume_user = root + +# Default owning group for new volumes. (string value) +#quobyte_default_volume_group = root + +# User in service instance that will be used for authentication. +# (string value) +#service_instance_user = + +# Password for service instance user. (string value) +#service_instance_password = + +# Path to host's private key. (string value) +#path_to_private_key = + +# Maximum time in seconds to wait for creating service instance. +# (integer value) +#max_time_to_build_instance = 300 + +# Name or ID of service instance in Nova to use for share exports. +# Used only when share servers handling is disabled. (string value) +#service_instance_name_or_id = + +# Warning: Failed to format sample for service_net_name_or_ip +# unhashable type: 'HostAddress' + +# Warning: Failed to format sample for tenant_net_name_or_ip +# unhashable type: 'HostAddress' + +# Name of image in Glance, that will be used for service instance +# creation. Only used if driver_handles_share_servers=True. (string +# value) +#service_image_name = manila-service-image + +# Name of service instance. Only used if +# driver_handles_share_servers=True. (string value) +#service_instance_name_template = manila_service_instance_%s + +# Keypair name that will be created and used for service instances. +# Only used if driver_handles_share_servers=True. (string value) +#manila_service_keypair_name = manila-service + +# Path to hosts public key. Only used if +# driver_handles_share_servers=True. (string value) +#path_to_public_key = ~/.ssh/id_rsa.pub + +# Security group name, that will be used for service instance +# creation. Only used if driver_handles_share_servers=True. (string +# value) +#service_instance_security_group = manila-service + +# ID of flavor, that will be used for service instance creation. Only +# used if driver_handles_share_servers=True. (integer value) +#service_instance_flavor_id = 100 + +# Name of manila service network. Used only with Neutron. Only used if +# driver_handles_share_servers=True. (string value) +#service_network_name = manila_service_network + +# CIDR of manila service network. Used only with Neutron and if +# driver_handles_share_servers=True. (string value) +#service_network_cidr = 10.254.0.0/16 + +# This mask is used for dividing service network into subnets, IP +# capacity of subnet with this mask directly defines possible amount +# of created service VMs per tenant's subnet. Used only with Neutron +# and if driver_handles_share_servers=True. (integer value) +#service_network_division_mask = 28 + +# Vif driver. Used only with Neutron and if +# driver_handles_share_servers=True. (string value) +#interface_driver = manila.network.linux.interface.OVSInterfaceDriver + +# Attach share server directly to share network. Used only with +# Neutron and if driver_handles_share_servers=True. (boolean value) +#connect_share_server_to_tenant_network = false + +# ID of neutron network used to communicate with admin network, to +# create additional admin export locations on. (string value) +#admin_network_id = + +# ID of neutron subnet used to communicate with admin network, to +# create additional admin export locations on. Related to +# 'admin_network_id'. (string value) +#admin_subnet_id = + +# Warning: Failed to format sample for tegile_nas_server +# unhashable type: 'HostAddress' + +# User name for the Tegile NAS server. (string value) +#tegile_nas_login = + +# Password for the Tegile NAS server. (string value) +#tegile_nas_password = + +# Create shares in this project (string value) +#tegile_default_project = + +# Path to the x509 certificate used for accessing the serviceinstance. +# (string value) +#winrm_cert_pem_path = ~/.ssl/cert.pem + +# Path to the x509 certificate key. (string value) +#winrm_cert_key_pem_path = ~/.ssl/key.pem + +# Use x509 certificates in order to authenticate to theservice +# instance. (boolean value) +#winrm_use_cert_based_auth = false + +# WinRM connection timeout. (integer value) +#winrm_conn_timeout = 60 + +# WinRM operation timeout. (integer value) +#winrm_operation_timeout = 60 + +# WinRM retry count. (integer value) +#winrm_retry_count = 3 + +# WinRM retry interval in seconds (integer value) +#winrm_retry_interval = 5 + +# Warning: Failed to format sample for zfs_share_export_ip +# unhashable type: 'HostAddress' + +# Warning: Failed to format sample for zfs_service_ip +# unhashable type: 'HostAddress' + +# Specify list of zpools that are allowed to be used by backend. Can +# contain nested datasets. Examples: Without nested dataset: +# 'zpool_name'. With nested dataset: 'zpool_name/nested_dataset_name'. +# Required. (list value) +#zfs_zpool_list = + +# Define here list of options that should be applied for each dataset +# creation if needed. Example: compression=gzip,dedup=off. Note that, +# for secondary replicas option 'readonly' will be set to 'on' and for +# active replicas to 'off' in any way. Also, 'quota' will be equal to +# share size. Optional. (list value) +#zfs_dataset_creation_options = + +# Prefix to be used in each dataset name. Optional. (string value) +#zfs_dataset_name_prefix = manila_share_ + +# Prefix to be used in each dataset snapshot name. Optional. (string +# value) +#zfs_dataset_snapshot_name_prefix = manila_share_snapshot_ + +# Remote ZFS storage hostname that should be used for SSH'ing. +# Optional. (boolean value) +#zfs_use_ssh = false + +# SSH user that will be used in 2 cases: 1) By manila-share service in +# case it is located on different host than its ZFS storage. 2) By +# manila-share services with other ZFS backends that perform +# replication. It is expected that SSH'ing will be key-based, +# passwordless. This user should be passwordless sudoer. Optional. +# (string value) +#zfs_ssh_username = + +# Password for user that is used for SSH'ing ZFS storage host. Not +# used for replication operations. They require passwordless SSH +# access. Optional. (string value) +#zfs_ssh_user_password = + +# Path to SSH private key that should be used for SSH'ing ZFS storage +# host. Not used for replication operations. Optional. (string value) +#zfs_ssh_private_key_path = + +# Specify list of share export helpers for ZFS storage. It should look +# like following: +# 'FOO_protocol=foo.FooClass,BAR_protocol=bar.BarClass'. Required. +# (list value) +#zfs_share_helpers = NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper + +# Set snapshot prefix for usage in ZFS replication. Required. (string +# value) +#zfs_replica_snapshot_prefix = tmp_snapshot_for_replication_ + +# Set snapshot prefix for usage in ZFS migration. Required. (string +# value) +#zfs_migration_snapshot_prefix = tmp_snapshot_for_share_migration_ + +# Warning: Failed to format sample for zfssa_host +# unhashable type: 'HostAddress' + +# Warning: Failed to format sample for zfssa_data_ip +# unhashable type: 'HostAddress' + +# ZFSSA management authorized username. (string value) +#zfssa_auth_user = + +# ZFSSA management authorized userpassword. (string value) +#zfssa_auth_password = + +# ZFSSA storage pool name. (string value) +#zfssa_pool = + +# ZFSSA project name. (string value) +#zfssa_project = + +# Controls checksum used for data blocks. (string value) +#zfssa_nas_checksum = fletcher4 + +# Data compression-off, lzjb, gzip-2, gzip, gzip-9. (string value) +#zfssa_nas_compression = off + +# Controls behavior when servicing synchronous writes. (string value) +#zfssa_nas_logbias = latency + +# Location of project in ZFS/SA. (string value) +#zfssa_nas_mountpoint = + +# Controls whether a share quota includes snapshot. (string value) +#zfssa_nas_quota_snap = true + +# Controls whether file ownership can be changed. (string value) +#zfssa_nas_rstchown = true + +# Controls whether the share is scanned for viruses. (string value) +#zfssa_nas_vscan = false + +# REST connection timeout (in seconds). (string value) +#zfssa_rest_timeout = + +# Driver policy for share manage. A strict policy checks for a schema +# named manila_managed, and makes sure its value is true. A loose +# policy does not check for the schema. (string value) +# Allowed values: loose, strict +#zfssa_manage_policy = loose + +# Whether to enable pre hooks or not. (boolean value) +#enable_pre_hooks = false + +# Whether to enable post hooks or not. (boolean value) +#enable_post_hooks = false + +# Whether to enable periodic hooks or not. (boolean value) +#enable_periodic_hooks = false + +# Whether to suppress pre hook errors (allow driver perform actions) +# or not. (boolean value) +#suppress_pre_hooks_errors = false + +# Whether to suppress post hook errors (allow driver's results to pass +# through) or not. (boolean value) +#suppress_post_hooks_errors = false + +# Interval in seconds between execution of periodic hooks. Used when +# option 'enable_periodic_hooks' is set to True. Default is 300. +# (floating point value) +#periodic_hooks_interval = 300.0 + +# Driver to use for share creation. (string value) +#share_driver = manila.share.drivers.generic.GenericShareDriver + +# Driver(s) to perform some additional actions before and after share +# driver actions and on a periodic basis. Default is []. (list value) +#hook_drivers = + +# Whether share servers will be deleted on deletion of the last share. +# (boolean value) +#delete_share_server_with_last_share = false + +# If set to True, then manila will deny access and remove all access +# rules on share unmanage.If set to False - nothing will be changed. +# (boolean value) +#unmanage_remove_access_rules = false + +# If set to True, then Manila will delete all share servers which were +# unused more than specified time .If set to False - automatic +# deletion of share servers will be disabled. (boolean value) +#automatic_share_server_cleanup = true + +# Unallocated share servers reclamation time interval (minutes). +# Minimum value is 10 minutes, maximum is 60 minutes. The reclamation +# function is run every 10 minutes and delete share servers which were +# unused more than unused_share_server_cleanup_interval option +# defines. This value reflects the shortest time Manila will wait for +# a share server to go unutilized before deleting it. (integer value) +# Minimum value: 10 +# Maximum value: 60 +#unused_share_server_cleanup_interval = 10 + +# This value, specified in seconds, determines how often the share +# manager will poll for the health (replica_state) of each replica +# instance. (integer value) +#replica_state_update_interval = 300 + +# This value, specified in seconds, determines how often the share +# manager will poll the driver to perform the next step of migration +# in the storage backend, for a migrating share. (integer value) +#migration_driver_continue_update_interval = 60 + +# This value, specified in seconds, determines how often the share +# manager will poll the driver to update the share usage size in the +# storage backend, for shares in that backend. (integer value) +#share_usage_size_update_interval = 300 + +# If set to True, share usage size will be polled for in the interval +# specified with "share_usage_size_update_interval". Usage data can be +# consumed by telemetry integration. If telemetry is not configured, +# this option must be set to False. If set to False - gathering share +# usage size will be disabled. (boolean value) +#enable_gathering_share_usage_size = false + +# The full class name of the Volume API class to use. (string value) +#volume_api_class = manila.volume.cinder.API + +# Sets the value of TCP_KEEPALIVE (True/False) for each server socket. +# (boolean value) +#tcp_keepalive = true + +# Sets the value of TCP_KEEPINTVL in seconds for each server socket. +# Not supported on OS X. (integer value) +#tcp_keepalive_interval = + +# Sets the value of TCP_KEEPCNT for each server socket. Not supported +# on OS X. (integer value) +#tcp_keepalive_count = + +# If set to true, the logging level will be set to DEBUG instead of +# the default INFO level. (boolean value) +# Note: This option can be changed without restarting. +#debug = false + +# The name of a logging configuration file. This file is appended to +# any existing logging configuration files. For details about logging +# configuration files, see the Python logging module documentation. +# Note that when logging configuration files are used then all logging +# configuration is set in the configuration file and other logging +# configuration options are ignored (for example, +# logging_context_format_string). (string value) +# Note: This option can be changed without restarting. +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Defines the format string for %%(asctime)s in log records. Default: +# %(default)s . This option is ignored if log_config_append is set. +# (string value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to send logging output to. If no default +# is set, logging will go to stderr as defined by use_stderr. This +# option is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative log_file paths. +# This option is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Uses logging handler designed to watch file system. When log file is +# moved or removed this handler will open a new log file with +# specified path instantaneously. It makes sense only if log_file +# option is specified and Linux platform is used. This option is +# ignored if log_config_append is set. (boolean value) +#watch_log_file = false + +# Use syslog for logging. Existing syslog format is DEPRECATED and +# will be changed later to honor RFC5424. This option is ignored if +# log_config_append is set. (boolean value) +#use_syslog = false + +# Enable journald for logging. If running in a systemd environment you +# may wish to enable journal support. Doing so will use the journal +# native protocol which includes structured metadata in addition to +# log messages.This option is ignored if log_config_append is set. +# (boolean value) +#use_journal = false + +# Syslog facility to receive log lines. This option is ignored if +# log_config_append is set. (string value) +#syslog_log_facility = LOG_USER + +# Log output to standard error. This option is ignored if +# log_config_append is set. (boolean value) +#use_stderr = false + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages when context is undefined. +# (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Additional data to append to log message when logging level for the +# message is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string +# value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# Defines the format string for %(user_identity)s that is used in +# logging_context_format_string. (string value) +#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +# List of package logging levels in logger=LEVEL pairs. This option is +# ignored if log_config_append is set. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# The format for an instance that is passed with the log message. +# (string value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. +# (string value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Interval, number of seconds, of log rate limiting. (integer value) +#rate_limit_interval = 0 + +# Maximum number of logged messages per rate_limit_interval. (integer +# value) +#rate_limit_burst = 0 + +# Log level name used by rate limiting: CRITICAL, ERROR, INFO, +# WARNING, DEBUG or empty string. Logs with level greater or equal to +# rate_limit_except_level are not filtered. An empty string means that +# all levels are filtered. (string value) +#rate_limit_except_level = CRITICAL + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size = 30 + +# The pool size limit for connections expiration policy (integer +# value) +#conn_pool_min_size = 2 + +# The time-to-live in sec of idle connections in the pool (integer +# value) +#conn_pool_ttl = 1200 + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve to this +# address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +# Allowed values: redis, sentinel, dummy +#rpc_zmq_matchmaker = redis + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. +# Default is unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. +# Must match "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Number of seconds to wait before all pending messages will be sent +# after closing a socket. The default value of -1 specifies an +# infinite linger period. The value of 0 specifies no linger period. +# Pending messages shall be discarded immediately when the socket is +# closed. Positive values specify an upper bound for the linger +# period. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_cast_timeout +#zmq_linger = -1 + +# The default number of seconds that poll should wait. Poll raises +# timeout exception when timeout expired. (integer value) +#rpc_poll_timeout = 1 + +# Expiration timeout in seconds of a name service record about +# existing target ( < 0 means no timeout). (integer value) +#zmq_target_expire = 300 + +# Update period in seconds of a name service record about existing +# target. (integer value) +#zmq_target_update = 180 + +# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. +# (boolean value) +#use_pub_sub = false + +# Use ROUTER remote proxy. (boolean value) +#use_router_proxy = false + +# This option makes direct connections dynamic or static. It makes +# sense only with use_router_proxy=False which means to use direct +# connections for direct message types (ignored otherwise). (boolean +# value) +#use_dynamic_connections = false + +# How many additional connections to a host will be made for failover +# reasons. This option is actual only in dynamic connections mode. +# (integer value) +#zmq_failover_connections = 2 + +# Minimal port number for random ports range. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#rpc_zmq_min_port = 49153 + +# Maximal port number for random ports range. (integer value) +# Minimum value: 1 +# Maximum value: 65536 +#rpc_zmq_max_port = 65536 + +# Number of retries to find free port number before fail with +# ZMQBindError. (integer value) +#rpc_zmq_bind_port_retries = 100 + +# Default serialization mechanism for serializing/deserializing +# outgoing/incoming messages (string value) +# Allowed values: json, msgpack +#rpc_zmq_serialization = json + +# This option configures round-robin mode in zmq socket. True means +# not keeping a queue when server side disconnects. False means to +# keep queue and messages even if server is disconnected, when the +# server appears we send all accumulated messages to it. (boolean +# value) +#zmq_immediate = true + +# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 +# (or any other negative value) means to skip any overrides and leave +# it to OS default; 0 and 1 (or any other positive value) mean to +# disable and enable the option respectively. (integer value) +#zmq_tcp_keepalive = -1 + +# The duration between two keepalive transmissions in idle condition. +# The unit is platform dependent, for example, seconds in Linux, +# milliseconds in Windows etc. The default value of -1 (or any other +# negative value and 0) means to skip any overrides and leave it to OS +# default. (integer value) +#zmq_tcp_keepalive_idle = -1 + +# The number of retransmissions to be carried out before declaring +# that remote end is not available. The default value of -1 (or any +# other negative value and 0) means to skip any overrides and leave it +# to OS default. (integer value) +#zmq_tcp_keepalive_cnt = -1 + +# The duration between two successive keepalive retransmissions, if +# acknowledgement to the previous keepalive transmission is not +# received. The unit is platform dependent, for example, seconds in +# Linux, milliseconds in Windows etc. The default value of -1 (or any +# other negative value and 0) means to skip any overrides and leave it +# to OS default. (integer value) +#zmq_tcp_keepalive_intvl = -1 + +# Maximum number of (green) threads to work concurrently. (integer +# value) +#rpc_thread_pool_size = 100 + +# Expiration timeout in seconds of a sent/received message after which +# it is not tracked anymore by a client/server. (integer value) +#rpc_message_ttl = 300 + +# Wait for message acknowledgements from receivers. This mechanism +# works only via proxy without PUB/SUB. (boolean value) +#rpc_use_acks = false + +# Number of seconds to wait for an ack from a cast/call. After each +# retry attempt this timeout is multiplied by some specified +# multiplier. (integer value) +#rpc_ack_timeout_base = 15 + +# Number to multiply base ack timeout by after each retry attempt. +# (integer value) +#rpc_ack_timeout_multiplier = 2 + +# Default number of message sending attempts in case of any problems +# occurred: positive value N means at most N retries, 0 means no +# retries, None or -1 (or any other negative values) mean to retry +# forever. This option is used only if acknowledgments are enabled. +# (integer value) +#rpc_retry_attempts = 3 + +# List of publisher hosts SubConsumer can subscribe on. This option +# has higher priority then the default publishers list taken from the +# matchmaker. (list value) +#subscribe_on = + +# Size of executor thread pool when executor is threading or eventlet. +# (integer value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# A URL representing the messaging driver to use and its full +# configuration. (string value) +#transport_url = + +# DEPRECATED: The messaging driver to use, defaults to rabbit. Other +# drivers include amqp and zmq. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rpc_backend = rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the transport_url +# option. (string value) +#control_exchange = openstack + + +[cinder] + +# +# From manila +# + +# Allow attaching between instances and volumes in different +# availability zones. (boolean value) +# Deprecated group/name - [DEFAULT]/cinder_cross_az_attach +#cross_az_attach = true + +# Location of CA certificates file to use for cinder client requests. +# (string value) +# Deprecated group/name - [DEFAULT]/cinder_ca_certificates_file +#ca_certificates_file = + +# Number of cinderclient retries on failed HTTP calls. (integer value) +# Deprecated group/name - [DEFAULT]/cinder_http_retries +#http_retries = 3 + +# Allow to perform insecure SSL requests to cinder. (boolean value) +# Deprecated group/name - [DEFAULT]/cinder_api_insecure +#api_insecure = false + +# Endpoint type to be used with cinder client calls. (string value) +#endpoint_type = publicURL + +# Region name for connecting to cinder. (string value) +#region_name = + +# Authentication URL (string value) +#auth_url = + +# Authentication type to load (string value) +# Deprecated group/name - [cinder]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying HTTPs +# connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Optional domain ID to use with v3 and v2 parameters. It will be used +# for both the user and project domain in v3 and ignored in v2 +# authentication. (string value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will +# be used for both the user and project domain in v3 and ignored in v2 +# authentication. (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [cinder]/tenant_id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [cinder]/tenant_name +#project_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [cinder]/user_name +#username = + + +[cors] + +# +# From manila +# + +# Indicate whether this resource may be shared with the domain +# received in the requests "origin" header. Format: +# "://[:]", no trailing slash. Example: +# https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials +# (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to +# HTTP Simple Headers. (list value) +#expose_headers = + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list +# value) +#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH + +# Indicate which header field names may be used during the actual +# request. (list value) +#allow_headers = + +# +# From oslo.middleware.cors +# + +# Indicate whether this resource may be shared with the domain +# received in the requests "origin" header. Format: +# "://[:]", no trailing slash. Example: +# https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials +# (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to +# HTTP Simple Headers. (list value) +#expose_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Openstack-Manila-Api-Version,X-OpenStack-Manila-API-Experimental,X-Subject-Token,X-Service-Token + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list +# value) +#allow_methods = GET,PUT,POST,DELETE,PATCH + +# Indicate which header field names may be used during the actual +# request. (list value) +#allow_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Openstack-Manila-Api-Version,X-OpenStack-Manila-API-Experimental,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id + + +[database] + +# +# From oslo.db +# + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. +# (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave +# database. (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including +# the default, overrides any server-set SQL mode. To use whatever SQL +# mode is set by the server configuration, set this to no value. +# Example: mysql_sql_mode= (string value) +#mysql_sql_mode = TRADITIONAL + +# If True, transparently enables support for handling MySQL Cluster +# (NDB). (boolean value) +#mysql_enable_ndb = false + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. Setting a +# value of 0 indicates no limit. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = 5 + +# Maximum number of database connection retries during startup. Set to +# -1 to specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = 50 + +# Verbosity of SQL debugging information: 0=None, 100=Everything. +# (integer value) +# Minimum value: 0 +# Maximum value: 100 +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection +# lost. (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database +# operation up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries +# of a database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before +# error is raised. Set to -1 to specify an infinite retry count. +# (integer value) +#db_max_retries = 20 + +# +# From oslo.db.concurrency +# + +# Enable the experimental use of thread pooling for all DB API calls +# (boolean value) +# Deprecated group/name - [DEFAULT]/dbapi_use_tpool +#use_tpool = false + + +[healthcheck] + +# +# From manila +# + +# DEPRECATED: The path to respond to healtcheck requests on. (string +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#path = /healthcheck + +# Show more detailed information as part of the response (boolean +# value) +#detailed = false + +# Additional backends that can perform health checks and report that +# information back as part of a request. (list value) +#backends = + +# Check the presence of a file to determine if an application is +# running on a port. Used by DisableByFileHealthcheck plugin. (string +# value) +#disable_by_file_path = + +# Check the presence of a file based on a port to determine if an +# application is running on a port. Expects a "port:path" list of +# strings. Used by DisableByFilesPortsHealthcheck plugin. (list value) +#disable_by_file_paths = + + +[keystone_authtoken] + +# +# From keystonemiddleware.auth_token +# + +# Complete "public" Identity API endpoint. This endpoint should not be +# an "admin" endpoint, as it should be accessible by all end users. +# Unauthenticated clients are redirected to this endpoint to +# authenticate. Although this endpoint should ideally be unversioned, +# client support in the wild varies. If you're using a versioned v2 +# endpoint here, then this should *not* be the same endpoint the +# service user utilizes for validating tokens, because normal end +# users may not be able to reach that endpoint. (string value) +#auth_uri = + +# API version of the admin Identity API endpoint. (string value) +#auth_version = + +# Do not handle authorization requests within the middleware, but +# delegate the authorization decision to downstream WSGI components. +# (boolean value) +#delay_auth_decision = false + +# Request timeout value for communicating with Identity API server. +# (integer value) +#http_connect_timeout = + +# How many times are we trying to reconnect when communicating with +# Identity API Server. (integer value) +#http_request_max_retries = 3 + +# Request environment key where the Swift cache object is stored. When +# auth_token middleware is deployed with a Swift cache, use this +# option to have the middleware share a caching backend with swift. +# Otherwise, use the ``memcached_servers`` option instead. (string +# value) +#cache = + +# Required if identity server requires client certificate (string +# value) +#certfile = + +# Required if identity server requires client certificate (string +# value) +#keyfile = + +# A PEM encoded Certificate Authority to use when verifying HTTPs +# connections. Defaults to system CAs. (string value) +#cafile = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# The region in which the identity server can be found. (string value) +#region_name = + +# DEPRECATED: Directory used to cache files related to PKI tokens. +# This option has been deprecated in the Ocata release and will be +# removed in the P release. (string value) +# This option is deprecated for removal since Ocata. +# Its value may be silently ignored in the future. +# Reason: PKI token format is no longer supported. +#signing_dir = + +# Optionally specify a list of memcached server(s) to use for caching. +# If left undefined, tokens will instead be cached in-process. (list +# value) +# Deprecated group/name - [keystone_authtoken]/memcache_servers +#memcached_servers = + +# In order to prevent excessive effort spent validating tokens, the +# middleware caches previously-seen tokens for a configurable duration +# (in seconds). Set to -1 to disable caching completely. (integer +# value) +#token_cache_time = 300 + +# DEPRECATED: Determines the frequency at which the list of revoked +# tokens is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache duration may +# significantly reduce performance. Only valid for PKI tokens. This +# option has been deprecated in the Ocata release and will be removed +# in the P release. (integer value) +# This option is deprecated for removal since Ocata. +# Its value may be silently ignored in the future. +# Reason: PKI token format is no longer supported. +#revocation_cache_time = 10 + +# (Optional) If defined, indicate whether token data should be +# authenticated or authenticated and encrypted. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token data is +# encrypted and authenticated in the cache. If the value is not one of +# these options or empty, auth_token will raise an exception on +# initialization. (string value) +# Allowed values: None, MAC, ENCRYPT +#memcache_security_strategy = None + +# (Optional, mandatory if memcache_security_strategy is defined) This +# string is used for key derivation. (string value) +#memcache_secret_key = + +# (Optional) Number of seconds memcached server is considered dead +# before it is tried again. (integer value) +#memcache_pool_dead_retry = 300 + +# (Optional) Maximum total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize = 10 + +# (Optional) Socket timeout in seconds for communicating with a +# memcached server. (integer value) +#memcache_pool_socket_timeout = 3 + +# (Optional) Number of seconds a connection to memcached is held +# unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout = 60 + +# (Optional) Number of seconds that an operation will wait to get a +# memcached client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout = 10 + +# (Optional) Use the advanced (eventlet safe) memcached client pool. +# The advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool = false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If +# False, middleware will not ask for service catalog on token +# validation and will not set the X-Service-Catalog header. (boolean +# value) +#include_service_catalog = true + +# Used to control the use and type of token binding. Can be set to: +# "disabled" to not check token binding. "permissive" (default) to +# validate binding information if the bind type is of a form known to +# the server and ignore it if not. "strict" like "permissive" but if +# the bind type is unknown the token will be rejected. "required" any +# form of token binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string value) +#enforce_token_bind = permissive + +# DEPRECATED: If true, the revocation list will be checked for cached +# tokens. This requires that PKI tokens are configured on the identity +# server. (boolean value) +# This option is deprecated for removal since Ocata. +# Its value may be silently ignored in the future. +# Reason: PKI token format is no longer supported. +#check_revocations_for_cached = false + +# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may +# be a single algorithm or multiple. The algorithms are those +# supported by Python standard hashlib.new(). The hashes will be tried +# in the order given, so put the preferred one first for performance. +# The result of the first hash will be stored in the cache. This will +# typically be set to multiple values only while migrating from a less +# secure algorithm to a more secure one. Once all the old tokens are +# expired this option should be set to a single value for better +# performance. (list value) +# This option is deprecated for removal since Ocata. +# Its value may be silently ignored in the future. +# Reason: PKI token format is no longer supported. +#hash_algorithms = md5 + +# A choice of roles that must be present in a service token. Service +# tokens are allowed to request that an expired token can be used and +# so this check should tightly control that only actual services +# should be sending this token. Roles here are applied as an ANY check +# so any role in this list must be present. For backwards +# compatibility reasons this currently only affects the allow_expired +# check. (list value) +#service_token_roles = service + +# For backwards compatibility reasons we must let valid service tokens +# pass that don't pass the service_token_roles check as valid. Setting +# this true will become the default in a future release and should be +# enabled if possible. (boolean value) +#service_token_roles_required = false + +# Authentication type to load (string value) +# Deprecated group/name - [keystone_authtoken]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string +# value) +#auth_section = + + +[matchmaker_redis] + +# +# From oslo.messaging +# + +# DEPRECATED: Host to locate redis. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#host = 127.0.0.1 + +# DEPRECATED: Use this port to connect to redis host. (port value) +# Minimum value: 0 +# Maximum value: 65535 +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#port = 6379 + +# DEPRECATED: Password for Redis server (optional). (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#password = + +# DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), +# e.g., [host:port, host1:port ... ] (list value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#sentinel_hosts = + +# Redis replica set name. (string value) +#sentinel_group_name = oslo-messaging-zeromq + +# Time in ms to wait between connection attempts. (integer value) +#wait_timeout = 2000 + +# Time in ms to wait before the transaction is killed. (integer value) +#check_timeout = 20000 + +# Timeout in ms on blocking socket operations. (integer value) +#socket_timeout = 10000 + + +[neutron] + +# +# From manila +# + +# URL for connecting to neutron. (string value) +# Deprecated group/name - [DEFAULT]/neutron_url +#url = http://127.0.0.1:9696 + +# Timeout value for connecting to neutron in seconds. (integer value) +# Deprecated group/name - [DEFAULT]/neutron_url_timeout +#url_timeout = 30 + +# If set, ignore any SSL validation issues. (boolean value) +#api_insecure = false + +# Auth strategy for connecting to neutron in admin context. (string +# value) +#auth_strategy = keystone + +# DEPRECATED: Location of CA certificates file to use for neutron +# client requests. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#ca_certificates_file = + +# Endpoint type to be used with neutron client calls. (string value) +#endpoint_type = publicURL + +# Region name for connecting to neutron in admin context. (string +# value) +#region_name = + +# Authentication URL (string value) +#auth_url = + +# Authentication type to load (string value) +# Deprecated group/name - [neutron]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying HTTPs +# connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Optional domain ID to use with v3 and v2 parameters. It will be used +# for both the user and project domain in v3 and ignored in v2 +# authentication. (string value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will +# be used for both the user and project domain in v3 and ignored in v2 +# authentication. (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [neutron]/tenant_id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [neutron]/tenant_name +#project_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [neutron]/user_name +#username = + + +[nova] + +# +# From manila +# + +# Version of Nova API to be used. (string value) +# Deprecated group/name - [DEFAULT]/nova_api_microversion +#api_microversion = 2.10 + +# Location of CA certificates file to use for nova client requests. +# (string value) +# Deprecated group/name - [DEFAULT]/nova_ca_certificates_file +#ca_certificates_file = + +# Allow to perform insecure SSL requests to nova. (boolean value) +# Deprecated group/name - [DEFAULT]/nova_api_insecure +#api_insecure = false + +# Endpoint type to be used with nova client calls. (string value) +#endpoint_type = publicURL + +# Region name for connecting to nova. (string value) +#region_name = + +# Authentication URL (string value) +#auth_url = + +# Authentication type to load (string value) +# Deprecated group/name - [nova]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying HTTPs +# connections. (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Optional domain ID to use with v3 and v2 parameters. It will be used +# for both the user and project domain in v3 and ignored in v2 +# authentication. (string value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will +# be used for both the user and project domain in v3 and ignored in v2 +# authentication. (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [nova]/tenant_id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [nova]/tenant_name +#project_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [nova]/user_name +#username = + + +[oslo_concurrency] + +# +# From manila +# + +# Enables or disables inter-process locks. (boolean value) +#disable_process_locking = false + +# Directory to use for lock files. For security, the specified +# directory should only be writable by the user running the processes +# that need locking. Defaults to environment variable OSLO_LOCK_PATH. +# If external locks are used, a lock path must be set. (string value) +#lock_path = + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# Name for the AMQP container. must be globally unique. Defaults to a +# generated UUID (string value) +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +#trace = false + +# Attempt to connect via SSL. If no other ssl-related parameters are +# given, it will use the system's CA-bundle to verify the server's +# certificate. (boolean value) +#ssl = false + +# CA certificate PEM file used to verify the server's certificate +# (string value) +#ssl_ca_file = + +# Self-identifying certificate PEM file for client authentication +# (string value) +#ssl_cert_file = + +# Private key PEM file used to sign ssl_cert_file certificate +# (optional) (string value) +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +#ssl_key_password = + +# DEPRECATED: Accept clients using either SSL or plain TCP (boolean +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Not applicable - not a SSL server +#allow_insecure_clients = false + +# Space separated list of acceptable SASL mechanisms (string value) +#sasl_mechanisms = + +# Path to directory that contains the SASL configuration (string +# value) +#sasl_config_dir = + +# Name of configuration file (without .conf suffix) (string value) +#sasl_config_name = + +# SASL realm to use if no realm present in username (string value) +#sasl_default_realm = + +# DEPRECATED: User name for message broker authentication (string +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Should use configuration option transport_url to provide the +# username. +#username = + +# DEPRECATED: Password for message broker authentication (string +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Should use configuration option transport_url to provide the +# password. +#password = + +# Seconds to pause before attempting to re-connect. (integer value) +# Minimum value: 1 +#connection_retry_interval = 1 + +# Increase the connection_retry_interval by this many seconds after +# each unsuccessful failover attempt. (integer value) +# Minimum value: 0 +#connection_retry_backoff = 2 + +# Maximum limit for connection_retry_interval + +# connection_retry_backoff (integer value) +# Minimum value: 1 +#connection_retry_interval_max = 30 + +# Time to pause between re-connecting an AMQP 1.0 link that failed due +# to a recoverable error. (integer value) +# Minimum value: 1 +#link_retry_delay = 10 + +# The maximum number of attempts to re-send a reply message which +# failed due to a recoverable error. (integer value) +# Minimum value: -1 +#default_reply_retry = 0 + +# The deadline for an rpc reply message delivery. (integer value) +# Minimum value: 5 +#default_reply_timeout = 30 + +# The deadline for an rpc cast or call message delivery. Only used +# when caller does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_send_timeout = 30 + +# The deadline for a sent notification message delivery. Only used +# when caller does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_notify_timeout = 30 + +# The duration to schedule a purge of idle sender links. Detach link +# after expiry. (integer value) +# Minimum value: 1 +#default_sender_link_timeout = 600 + +# Indicates the addressing mode used by the driver. +# Permitted values: +# 'legacy' - use legacy non-routable addressing +# 'routable' - use routable addresses +# 'dynamic' - use legacy addresses if the message bus does not +# support routing otherwise use routable addressing (string value) +#addressing_mode = dynamic + +# address prefix used when sending to a specific server (string value) +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +#group_request_prefix = unicast + +# Address prefix for all generated RPC addresses (string value) +#rpc_address_prefix = openstack.org/om/rpc + +# Address prefix for all generated Notification addresses (string +# value) +#notify_address_prefix = openstack.org/om/notify + +# Appended to the address prefix when sending a fanout message. Used +# by the message bus to identify fanout messages. (string value) +#multicast_address = multicast + +# Appended to the address prefix when sending to a particular +# RPC/Notification server. Used by the message bus to identify +# messages sent to a single destination. (string value) +#unicast_address = unicast + +# Appended to the address prefix when sending to a group of consumers. +# Used by the message bus to identify messages that should be +# delivered in a round-robin fashion across consumers. (string value) +#anycast_address = anycast + +# Exchange name used in notification addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_notification_exchange if set +# else control_exchange if set +# else 'notify' (string value) +#default_notification_exchange = + +# Exchange name used in RPC addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_rpc_exchange if set +# else control_exchange if set +# else 'rpc' (string value) +#default_rpc_exchange = + +# Window size for incoming RPC Reply messages. (integer value) +# Minimum value: 1 +#reply_link_credit = 200 + +# Window size for incoming RPC Request messages (integer value) +# Minimum value: 1 +#rpc_server_credit = 100 + +# Window size for incoming Notification messages (integer value) +# Minimum value: 1 +#notify_server_credit = 100 + +# Send messages of this type pre-settled. +# Pre-settled messages will not receive acknowledgement +# from the peer. Note well: pre-settled messages may be +# silently discarded if the delivery fails. +# Permitted values: +# 'rpc-call' - send RPC Calls pre-settled +# 'rpc-reply'- send RPC Replies pre-settled +# 'rpc-cast' - Send RPC Casts pre-settled +# 'notify' - Send Notifications pre-settled +# (multi valued) +#pre_settled = rpc-cast +#pre_settled = rpc-reply + + +[oslo_messaging_kafka] + +# +# From oslo.messaging +# + +# DEPRECATED: Default Kafka broker Host (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#kafka_default_host = localhost + +# DEPRECATED: Default Kafka broker Port (port value) +# Minimum value: 0 +# Maximum value: 65535 +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#kafka_default_port = 9092 + +# Max fetch bytes of Kafka consumer (integer value) +#kafka_max_fetch_bytes = 1048576 + +# Default timeout(s) for Kafka consumers (floating point value) +#kafka_consumer_timeout = 1.0 + +# Pool Size for Kafka Consumers (integer value) +#pool_size = 10 + +# The pool size limit for connections expiration policy (integer +# value) +#conn_pool_min_size = 2 + +# The time-to-live in sec of idle connections in the pool (integer +# value) +#conn_pool_ttl = 1200 + +# Group id for Kafka consumer. Consumers in one group will coordinate +# message consumption (string value) +#consumer_group = oslo_messaging_consumer + +# Upper bound on the delay for KafkaProducer batching in seconds +# (floating point value) +#producer_batch_timeout = 0.0 + +# Size of batch for the producer async send (integer value) +#producer_batch_size = 16384 + + +[oslo_messaging_notifications] + +# +# From oslo.messaging +# + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +# Deprecated group/name - [DEFAULT]/notification_driver +#driver = + +# A URL representing the messaging driver to use for notifications. If +# not set, we fall back to the same configuration used for RPC. +# (string value) +# Deprecated group/name - [DEFAULT]/notification_transport_url +#transport_url = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +# Deprecated group/name - [DEFAULT]/notification_topics +#topics = notifications + +# The maximum number of attempts to re-send a notification message +# which failed to be delivered due to a recoverable error. 0 - No +# retry, -1 - indefinite (integer value) +#retry = -1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +#amqp_auto_delete = false + +# Enable SSL (boolean value) +#ssl = + +# SSL version to use (valid only if SSL enabled). Valid values are +# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be +# available on some distributions. (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version +#ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile +#ssl_key_file = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile +#ssl_cert_file = + +# SSL certification authority file (valid only if SSL enabled). +# (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs +#ssl_ca_file = + +# How long to wait before reconnecting in response to an AMQP consumer +# cancel notification. (floating point value) +#kombu_reconnect_delay = 1.0 + +# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression +# will not be used. This option may not be available in future +# versions. (string value) +#kombu_compression = + +# How long to wait a missing client before abandoning to send it its +# replies. This value should not be longer than rpc_response_timeout. +# (integer value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout +#kombu_missing_consumer_retry_timeout = 60 + +# Determines how the next RabbitMQ node is chosen in case the one we +# are currently connected to becomes unavailable. Takes effect only if +# more than one RabbitMQ node is provided in config. (string value) +# Allowed values: round-robin, shuffle +#kombu_failover_strategy = round-robin + +# DEPRECATED: The RabbitMQ broker address where a single node is used. +# (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_host = localhost + +# DEPRECATED: The RabbitMQ broker port where a single node is used. +# (port value) +# Minimum value: 0 +# Maximum value: 65535 +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_port = 5672 + +# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_hosts = $rabbit_host:$rabbit_port + +# DEPRECATED: The RabbitMQ userid. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_userid = guest + +# DEPRECATED: The RabbitMQ password. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_password = guest + +# The RabbitMQ login method. (string value) +# Allowed values: PLAIN, AMQPLAIN, RABBIT-CR-DEMO +#rabbit_login_method = AMQPLAIN + +# DEPRECATED: The RabbitMQ virtual host. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_virtual_host = / + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. +# (integer value) +#rabbit_retry_backoff = 2 + +# Maximum interval of RabbitMQ connection retries. Default is 30 +# seconds. (integer value) +#rabbit_interval_max = 30 + +# DEPRECATED: Maximum number of RabbitMQ connection retries. Default +# is 0 (infinite retry count). (integer value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#rabbit_max_retries = 0 + +# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, +# queue mirroring is no longer controlled by the x-ha-policy argument +# when declaring a queue. If you just want to make sure that all +# queues (except those with auto-generated names) are mirrored across +# all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha- +# mode": "all"}' " (boolean value) +#rabbit_ha_queues = false + +# Positive integer representing duration in seconds for queue TTL +# (x-expires). Queues which are unused for the duration of the TTL are +# automatically deleted. The parameter affects only reply and fanout +# queues. (integer value) +# Minimum value: 1 +#rabbit_transient_queues_ttl = 1800 + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down +# if heartbeat's keep-alive fails (0 disable the heartbeat). +# EXPERIMENTAL (integer value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake +# (boolean value) +#fake_rabbit = false + +# Maximum number of channels to allow (integer value) +#channel_max = + +# The maximum byte size for an AMQP frame (integer value) +#frame_max = + +# How often to send heartbeats for consumer's connections (integer +# value) +#heartbeat_interval = 3 + +# Arguments passed to ssl.wrap_socket (dict value) +#ssl_options = + +# Set socket timeout in seconds for connection's socket (floating +# point value) +#socket_timeout = 0.25 + +# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating +# point value) +#tcp_user_timeout = 0.25 + +# Set delay for reconnection to some host which has connection error +# (floating point value) +#host_connection_reconnect_delay = 0.25 + +# Connection factory implementation (string value) +# Allowed values: new, single, read_write +#connection_factory = single + +# Maximum number of connections to keep queued. (integer value) +#pool_max_size = 30 + +# Maximum number of connections to create above `pool_max_size`. +# (integer value) +#pool_max_overflow = 0 + +# Default number of seconds to wait for a connections to available +# (integer value) +#pool_timeout = 30 + +# Lifetime of a connection (since creation) in seconds or None for no +# recycling. Expired connections are closed on acquire. (integer +# value) +#pool_recycle = 600 + +# Threshold at which inactive (since release) connections are +# considered stale in seconds or None for no staleness. Stale +# connections are closed on acquire. (integer value) +#pool_stale = 60 + +# Default serialization mechanism for serializing/deserializing +# outgoing/incoming messages (string value) +# Allowed values: json, msgpack +#default_serializer_type = json + +# Persist notification messages. (boolean value) +#notification_persistence = false + +# Exchange name for sending notifications (string value) +#default_notification_exchange = ${control_exchange}_notification + +# Max number of not acknowledged message which RabbitMQ can send to +# notification listener. (integer value) +#notification_listener_prefetch_count = 100 + +# Reconnecting retry count in case of connectivity problem during +# sending notification, -1 means infinite retry. (integer value) +#default_notification_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during +# sending notification message (floating point value) +#notification_retry_delay = 0.25 + +# Time to live for rpc queues without consumers in seconds. (integer +# value) +#rpc_queue_expiration = 60 + +# Exchange name for sending RPC messages (string value) +#default_rpc_exchange = ${control_exchange}_rpc + +# Exchange name for receiving RPC replies (string value) +#rpc_reply_exchange = ${control_exchange}_rpc_reply + +# Max number of not acknowledged message which RabbitMQ can send to +# rpc listener. (integer value) +#rpc_listener_prefetch_count = 100 + +# Max number of not acknowledged message which RabbitMQ can send to +# rpc reply listener. (integer value) +#rpc_reply_listener_prefetch_count = 100 + +# Reconnecting retry count in case of connectivity problem during +# sending reply. -1 means infinite retry during rpc_timeout (integer +# value) +#rpc_reply_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during +# sending reply. (floating point value) +#rpc_reply_retry_delay = 0.25 + +# Reconnecting retry count in case of connectivity problem during +# sending RPC message, -1 means infinite retry. If actual retry +# attempts in not 0 the rpc request could be processed more than one +# time (integer value) +#default_rpc_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during +# sending RPC message (floating point value) +#rpc_retry_delay = 0.25 + + +[oslo_messaging_zmq] + +# +# From oslo.messaging +# + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve to this +# address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +# Allowed values: redis, sentinel, dummy +#rpc_zmq_matchmaker = redis + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. +# Default is unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. +# Must match "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Number of seconds to wait before all pending messages will be sent +# after closing a socket. The default value of -1 specifies an +# infinite linger period. The value of 0 specifies no linger period. +# Pending messages shall be discarded immediately when the socket is +# closed. Positive values specify an upper bound for the linger +# period. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_cast_timeout +#zmq_linger = -1 + +# The default number of seconds that poll should wait. Poll raises +# timeout exception when timeout expired. (integer value) +#rpc_poll_timeout = 1 + +# Expiration timeout in seconds of a name service record about +# existing target ( < 0 means no timeout). (integer value) +#zmq_target_expire = 300 + +# Update period in seconds of a name service record about existing +# target. (integer value) +#zmq_target_update = 180 + +# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. +# (boolean value) +#use_pub_sub = false + +# Use ROUTER remote proxy. (boolean value) +#use_router_proxy = false + +# This option makes direct connections dynamic or static. It makes +# sense only with use_router_proxy=False which means to use direct +# connections for direct message types (ignored otherwise). (boolean +# value) +#use_dynamic_connections = false + +# How many additional connections to a host will be made for failover +# reasons. This option is actual only in dynamic connections mode. +# (integer value) +#zmq_failover_connections = 2 + +# Minimal port number for random ports range. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#rpc_zmq_min_port = 49153 + +# Maximal port number for random ports range. (integer value) +# Minimum value: 1 +# Maximum value: 65536 +#rpc_zmq_max_port = 65536 + +# Number of retries to find free port number before fail with +# ZMQBindError. (integer value) +#rpc_zmq_bind_port_retries = 100 + +# Default serialization mechanism for serializing/deserializing +# outgoing/incoming messages (string value) +# Allowed values: json, msgpack +#rpc_zmq_serialization = json + +# This option configures round-robin mode in zmq socket. True means +# not keeping a queue when server side disconnects. False means to +# keep queue and messages even if server is disconnected, when the +# server appears we send all accumulated messages to it. (boolean +# value) +#zmq_immediate = true + +# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 +# (or any other negative value) means to skip any overrides and leave +# it to OS default; 0 and 1 (or any other positive value) mean to +# disable and enable the option respectively. (integer value) +#zmq_tcp_keepalive = -1 + +# The duration between two keepalive transmissions in idle condition. +# The unit is platform dependent, for example, seconds in Linux, +# milliseconds in Windows etc. The default value of -1 (or any other +# negative value and 0) means to skip any overrides and leave it to OS +# default. (integer value) +#zmq_tcp_keepalive_idle = -1 + +# The number of retransmissions to be carried out before declaring +# that remote end is not available. The default value of -1 (or any +# other negative value and 0) means to skip any overrides and leave it +# to OS default. (integer value) +#zmq_tcp_keepalive_cnt = -1 + +# The duration between two successive keepalive retransmissions, if +# acknowledgement to the previous keepalive transmission is not +# received. The unit is platform dependent, for example, seconds in +# Linux, milliseconds in Windows etc. The default value of -1 (or any +# other negative value and 0) means to skip any overrides and leave it +# to OS default. (integer value) +#zmq_tcp_keepalive_intvl = -1 + +# Maximum number of (green) threads to work concurrently. (integer +# value) +#rpc_thread_pool_size = 100 + +# Expiration timeout in seconds of a sent/received message after which +# it is not tracked anymore by a client/server. (integer value) +#rpc_message_ttl = 300 + +# Wait for message acknowledgements from receivers. This mechanism +# works only via proxy without PUB/SUB. (boolean value) +#rpc_use_acks = false + +# Number of seconds to wait for an ack from a cast/call. After each +# retry attempt this timeout is multiplied by some specified +# multiplier. (integer value) +#rpc_ack_timeout_base = 15 + +# Number to multiply base ack timeout by after each retry attempt. +# (integer value) +#rpc_ack_timeout_multiplier = 2 + +# Default number of message sending attempts in case of any problems +# occurred: positive value N means at most N retries, 0 means no +# retries, None or -1 (or any other negative values) mean to retry +# forever. This option is used only if acknowledgments are enabled. +# (integer value) +#rpc_retry_attempts = 3 + +# List of publisher hosts SubConsumer can subscribe on. This option +# has higher priority then the default publishers list taken from the +# matchmaker. (list value) +#subscribe_on = + + +[oslo_middleware] + +# +# From manila +# + +# The maximum body size for each request, in bytes. (integer value) +# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size +# Deprecated group/name - [DEFAULT]/max_request_body_size +#max_request_body_size = 114688 + +# DEPRECATED: The HTTP Header that will be used to determine what the +# original request protocol scheme was, even if it was hidden by a SSL +# termination proxy. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#secure_proxy_ssl_header = X-Forwarded-Proto + +# Whether the application is behind a proxy or not. This determines if +# the middleware should parse the headers or not. (boolean value) +#enable_proxy_headers_parsing = false + +# +# From oslo.middleware.http_proxy_to_wsgi +# + +# Whether the application is behind a proxy or not. This determines if +# the middleware should parse the headers or not. (boolean value) +#enable_proxy_headers_parsing = false + + +[oslo_policy] + +# +# From manila +# + +# The file that defines policies. (string value) +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. (string +# value) +#policy_default_rule = default + +# Directories where policy configuration files are stored. They can be +# relative to any directory in the search path defined by the +# config_dir option, or absolute paths. The file defined by +# policy_file must exist for these directories to be searched. +# Missing or empty directories are ignored. (multi valued) +#policy_dirs = policy.d diff --git a/doc/source/conf.py b/doc/source/conf.py index dda88c0a34..19e61460d6 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -54,8 +54,13 @@ extensions = ['sphinx.ext.autodoc', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'openstackdocstheme', + 'oslo_config.sphinxconfiggen', ] +config_generator_config_file = ( + '../../etc/oslo-config-generator/manila.conf') +sample_config_basename = '_static/manila' + # openstackdocstheme options repository_name = 'openstack/manila' bug_project = 'manila' diff --git a/doc/source/configuration/README.rst b/doc/source/configuration/README.rst deleted file mode 100644 index ebb11a1707..0000000000 --- a/doc/source/configuration/README.rst +++ /dev/null @@ -1,17 +0,0 @@ -================================== -Manila Configuration Documentation -================================== - -Introduction: -------------- - -This directory is intended to hold any documentation that relates to -how to configure Manila. Some of this content will be automatically -generated in upcoming documentation work. At the moment, however, it -is not. Changes to configuration options for Manila or its drivers -need to be put under this directory. - -The full spec for organization of documentation may be seen in the -`OS Manuals Migration Spec -`_. - diff --git a/doc/source/configuration/figures/hds_network.jpg b/doc/source/configuration/figures/hds_network.jpg new file mode 100644 index 0000000000..bfd9d2bb7c Binary files /dev/null and b/doc/source/configuration/figures/hds_network.jpg differ diff --git a/doc/source/configuration/figures/hsp_network.png b/doc/source/configuration/figures/hsp_network.png new file mode 100644 index 0000000000..024ddd8c9e Binary files /dev/null and b/doc/source/configuration/figures/hsp_network.png differ diff --git a/doc/source/configuration/figures/openstack-spectrumscale-setup.JPG b/doc/source/configuration/figures/openstack-spectrumscale-setup.JPG new file mode 100644 index 0000000000..500b38c0fa Binary files /dev/null and b/doc/source/configuration/figures/openstack-spectrumscale-setup.JPG differ diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index 42c7d84c5a..0000000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -Configuration -------------- - -.. toctree:: - :maxdepth: 1 - - README diff --git a/doc/source/configuration/shared-file-systems.rst b/doc/source/configuration/shared-file-systems.rst new file mode 100644 index 0000000000..3ccda2d654 --- /dev/null +++ b/doc/source/configuration/shared-file-systems.rst @@ -0,0 +1,16 @@ +============= +Configuration +============= + +.. toctree:: + :maxdepth: 1 + + shared-file-systems/overview + shared-file-systems/api.rst + shared-file-systems/drivers.rst + shared-file-systems/log-files.rst + shared-file-systems/config-options.rst + shared-file-systems/samples/index.rst + +The Shared File Systems service works with many different drivers that +you can configure by using these instructions. diff --git a/doc/source/configuration/shared-file-systems/api.rst b/doc/source/configuration/shared-file-systems/api.rst new file mode 100644 index 0000000000..1aa5a459ed --- /dev/null +++ b/doc/source/configuration/shared-file-systems/api.rst @@ -0,0 +1,11 @@ +===================================== +Shared File Systems API configuration +===================================== + +Configuration options +~~~~~~~~~~~~~~~~~~~~~ + +The following options allow configuration of the APIs that +Shared File Systems service supports. + +.. include:: ../tables/manila-api.inc diff --git a/doc/source/configuration/shared-file-systems/config-options.rst b/doc/source/configuration/shared-file-systems/config-options.rst new file mode 100644 index 0000000000..4e66682ea8 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/config-options.rst @@ -0,0 +1,18 @@ +================== +Additional options +================== + +These options can also be set in the ``manila.conf`` file. + +.. include:: ../tables/manila-ca.inc +.. include:: ../tables/manila-common.inc +.. include:: ../tables/manila-compute.inc +.. include:: ../tables/manila-ganesha.inc +.. include:: ../tables/manila-hnas.inc +.. include:: ../tables/manila-quota.inc +.. include:: ../tables/manila-redis.inc +.. include:: ../tables/manila-san.inc +.. include:: ../tables/manila-scheduler.inc +.. include:: ../tables/manila-share.inc +.. include:: ../tables/manila-tegile.inc +.. include:: ../tables/manila-winrm.inc diff --git a/doc/source/configuration/shared-file-systems/drivers.rst b/doc/source/configuration/shared-file-systems/drivers.rst new file mode 100644 index 0000000000..f4c29fd93a --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers.rst @@ -0,0 +1,67 @@ +============= +Share drivers +============= + +.. sort by the drivers by open source software +.. and the drivers for proprietary components + +.. toctree:: + :maxdepth: 1 + + drivers/generic-driver.rst + drivers/cephfs-native-driver.rst + drivers/dell-emc-vmax-driver.rst + drivers/glusterfs-driver.rst + drivers/glusterfs-native-driver.rst + drivers/hdfs-native-driver.rst + drivers/lvm-driver.rst + drivers/zfs-on-linux-driver.rst + drivers/zfssa-manila-driver.rst + drivers/emc-isilon-driver.rst + drivers/emc-vnx-driver.rst + drivers/emc-unity-driver.rst + drivers/hitachi-hnas-driver.rst + drivers/hitachi-hsp-driver.rst + drivers/hpe-3par-share-driver.rst + drivers/huawei-nas-driver.rst + drivers/ibm-spectrumscale-driver.rst + drivers/maprfs-native-driver.rst + drivers/netapp-cluster-mode-driver.rst + drivers/quobyte-driver.rst + + +To use different share drivers for the Shared File Systems service, use the +parameters described in these sections. + +The Shared File Systems service can handle multiple drivers at once. +The configuration for all of them follows a common paradigm: + +#. In the configuration file ``manila.conf``, configure the option + ``enabled_backends`` with the list of names for your configuration. + + For example, if you want to enable two drivers and name them + ``Driver1`` and ``Driver2``: + + .. code-block:: ini + + [Default] + # ... + enabled_backends = Driver1 Driver2 + +#. Configure a separate section for each driver using these + names. You need to define in each section at least the option + ``share_driver`` and assign it the value of your driver. In this + example it is the generic driver: + + .. code-block:: ini + + [Driver1] + share_driver = manila.share.drivers.generic.GenericShareDriver + # ... + + [Driver2] + share_driver = manila.share.drivers.generic.GenericShareDriver + # ... + +The share drivers are included in the `Shared File Systems repository +`_. diff --git a/doc/source/configuration/shared-file-systems/drivers/cephfs-native-driver.rst b/doc/source/configuration/shared-file-systems/drivers/cephfs-native-driver.rst new file mode 100644 index 0000000000..a3166e02e5 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/cephfs-native-driver.rst @@ -0,0 +1,294 @@ +==================== +CephFS Native driver +==================== + +The CephFS Native driver enables the Shared File Systems service to export +shared file systems to guests using the Ceph network protocol. Guests require a +Ceph client in order to mount the file system. + +Access is controlled via Ceph's cephx authentication system. When a user +requests share access for an ID, Ceph creates a corresponding Ceph auth ID and +a secret key, if they do not already exist, and authorizes the ID to access +the share. The client can then mount the share using the ID and the secret +key. + +To learn more about configuring Ceph clients to access the shares created +using this driver, please see the Ceph documentation ( +http://docs.ceph.com/docs/master/cephfs/). If you choose to use the kernel +client rather than the FUSE client, the share size limits set in the +Shared File Systems service may not be obeyed. + +Supported shared file systems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports CephFS shares. + +The following operations are supported with CephFS back end: + +- Create a share. + +- Delete a share. + +- Allow share access. + + - ``read-only`` access level is supported. + + - ``read-write`` access level is supported. + + + Note the following limitation for CephFS shares: + + - Only ``cephx`` access type is supported. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a consistency group (CG). + +- Delete a CG. + +- Create a CG snapshot. + +- Delete a CG snapshot. + +Requirements +~~~~~~~~~~~~ + +- Mitaka or later versions of manila. + +- Jewel or later versions of Ceph. + +- A Ceph cluster with a file system configured ( + http://docs.ceph.com/docs/master/cephfs/createfs/) + +- ``ceph-common`` package installed in the servers running the + ``manila-share`` service. + +- Ceph client installed in the guest, preferably the FUSE based client, + ``ceph-fuse``. + +- Network connectivity between your Ceph cluster's public network and the + servers running the ``manila-share`` service. + +- Network connectivity between your Ceph cluster's public network and guests. + +.. important:: A manila share backed onto CephFS is only as good as the + underlying file system. Take care when configuring your Ceph + cluster, and consult the latest guidance on the use of + CephFS in the Ceph documentation ( + http://docs.ceph.com/docs/master/cephfs/). + +Authorize the driver to communicate with Ceph +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Run the following commands to create a Ceph identity for the Shared File +Systems service to use: + +.. code-block:: console + + read -d '' MON_CAPS << EOF + allow r, + allow command "auth del", + allow command "auth caps", + allow command "auth get", + allow command "auth get-or-create" + EOF + + ceph auth get-or-create client.manila -o manila.keyring \ + mds 'allow *' \ + osd 'allow rw' \ + mon "$MON_CAPS" + + +``manila.keyring``, along with your ``ceph.conf`` file, then needs to be placed +on the server running the ``manila-share`` service. + +Enable snapshots in Ceph if you want to use them in the Shared File Systems +service: + +.. code-block:: console + + ceph mds set allow_new_snaps true --yes-i-really-mean-it + +In the server running the ``manila-share`` service, you can place the +``ceph.conf`` and ``manila.keyring`` files in the ``/etc/ceph`` directory. Set +the same owner for the ``manila-share`` process and the ``manila.keyring`` +file. Add the following section to the ``ceph.conf`` file. + +.. code-block:: ini + + [client.manila] + client mount uid = 0 + client mount gid = 0 + log file = /opt/stack/logs/ceph-client.manila.log + admin socket = /opt/stack/status/stack/ceph-$name.$pid.asok + keyring = /etc/ceph/manila.keyring + +It is advisable to modify the Ceph client's admin socket file and log file +locations so that they are co-located with the Shared File Systems services' +pid files and log files respectively. + + +Configure CephFS back end in ``manila.conf`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Add CephFS to ``enabled_share_protocols`` (enforced at the Shared File + Systems service's API layer). In this example we leave NFS and CIFS enabled, + although you can remove these if you only use CephFS: + + .. code-block:: ini + + enabled_share_protocols = NFS,CIFS,CEPHFS + +#. Refer to the following table for the list of all the ``cephfs_native`` + driver-specific configuration options. + + .. include:: ../../tables/manila-cephfs.inc + + Create a section to define a CephFS back end: + + .. code-block:: ini + + [cephfs1] + driver_handles_share_servers = False + share_backend_name = CEPHFS1 + share_driver = manila.share.drivers.cephfs.cephfs_native.CephFSNativeDriver + cephfs_conf_path = /etc/ceph/ceph.conf + cephfs_auth_id = manila + cephfs_cluster_name = ceph + cephfs_enable_snapshots = True + + Set ``cephfs_enable_snapshots`` to ``True`` in the section to let the driver + perform snapshot-related operations. Also set the + ``driver-handles-share-servers`` to ``False`` as the driver does not manage + the lifecycle of ``share-servers``. + +#. Edit ``enabled_share_backends`` to point to the driver's back-end section + using the section name. In this example we are also including another + back end (``generic1``), you would include whatever other back ends you have + configured. + + .. code-block:: ini + + enabled_share_backends = generic1,cephfs1 + + +Creating shares +~~~~~~~~~~~~~~~ + +The default share type may have ``driver_handles_share_servers`` set to +``True``. Configure a share type suitable for CephFS: + +.. code-block:: console + + manila type-create cephfstype false + + manila type-set cephfstype set share_backend_name='CEPHFS1' + +Then create a share: + +.. code-block:: console + + manila create --share-type cephfstype --name cephshare1 cephfs 1 + +Note the export location of the share: + +.. code-block:: console + + manila share-export-location-list cephshare1 + +The export location of the share contains the Ceph monitor (mon) addresses and +ports, and the path to be mounted. It is of the form, +``{mon ip addr:port}[,{mon ip addr:port}]:{path to be mounted}`` + + +Allowing access to shares +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Allow Ceph auth ID ``alice`` access to the share using ``cephx`` access type. + +.. code-block:: console + + manila access-allow cephshare1 cephx alice + +Note the access status and the secret access key of ``alice``. + +.. code-block:: console + + manila access-list cephshare1 + + +Mounting shares using FUSE client +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Using the secret key of the authorized ID ``alice``, create a keyring file +``alice.keyring``. + +.. code-block:: ini + + [client.alice] + key = AQA8+ANW/4ZWNRAAOtWJMFPEihBA1unFImJczA== + +Using the monitor IP addresses from the share's export location, create a +configuration file, ``ceph.conf``: + +.. code-block:: ini + + [client] + client quota = true + mon host = 192.168.1.7:6789, 192.168.1.8:6789, 192.168.1.9:6789 + +Finally, mount the file system, substituting the file names of the keyring and +configuration files you just created, and substituting the path to be mounted +from the share's export location: + +.. code-block:: console + + sudo ceph-fuse ~/mnt \ + --id=alice \ + --conf=./ceph.conf \ + --keyring=./alice.keyring \ + --client-mountpoint=/volumes/_nogroup/4c55ad20-9c55-4a5e-9233-8ac64566b98c + + +Known restrictions +~~~~~~~~~~~~~~~~~~ + +Consider the driver as a building block for supporting multi-tenant workloads +in the future. However, it can be used in private cloud deployments. + +- The guests have direct access to Ceph's public network. + +- The snapshot support of the driver is disabled by default. + ``cephfs_enable_snapshots`` configuration option needs to be set to ``True`` + to allow snapshot operations. + +- Snapshots are read-only. A user can read a snapshot's contents from the + ``.snap/{manila-snapshot-id}_{unknown-id}`` folder within the mounted + share. + +- To restrict share sizes, CephFS uses quotas that are enforced in the client + side. The CephFS clients are relied on to respect quotas. + + +Security +~~~~~~~~ + +- Each share's data is mapped to a distinct Ceph RADOS namespace. A guest is + restricted to access only that particular RADOS namespace. + +- An additional level of resource isolation can be provided by mapping a + share's contents to a separate RADOS pool. This layout would be preferred + only for cloud deployments with a limited number of shares needing strong + resource separation. You can do this by setting a share type specification, + ``cephfs:data_isolated`` for the share type used by the cephfs driver. + + .. code-block:: console + + manila type-key cephfstype set cephfs:data_isolated=True + +- Untrusted manila guests pose security risks to the Ceph storage cluster as + they would have direct access to the cluster's public network. diff --git a/doc/source/configuration/shared-file-systems/drivers/dell-emc-vmax-driver.rst b/doc/source/configuration/shared-file-systems/drivers/dell-emc-vmax-driver.rst new file mode 100644 index 0000000000..b0b5ca420a --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/dell-emc-vmax-driver.rst @@ -0,0 +1,309 @@ +==================== +Dell EMC VMAX driver +==================== + +The Dell EMC Shared File Systems service driver framework (EMCShareDriver) +utilizes the Dell EMC storage products to provide the shared file systems +to OpenStack. The Dell EMC driver is a plug-in based driver which is designed +to use different plug-ins to manage different Dell EMC storage products. + +The VMAX plug-in manages the VMAX to provide shared file systems. The EMC +driver framework with the VMAX plug-in is referred to as the VMAX driver +in this document. + +This driver performs the operations on VMAX eNAS by XMLAPI and the file +command line. Each back end manages one Data Mover of VMAX. Multiple +Shared File Systems service back ends need to be configured to manage +multiple Data Movers. + +Requirements +~~~~~~~~~~~~ + +- VMAX eNAS OE for File version 8.1 or higher + +- VMAX Unified or File only + +- The following licenses should be activated on VMAX for File: + + - CIFS + + - NFS + + - SnapSure (for snapshot) + + - ReplicationV2 (for create share from snapshot) + +Supported shared file systems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports CIFS and NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported for NFS. + - Only user access type is supported for CIFS. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +While the generic driver creates shared file systems based on cinder +volumes attached to nova VMs, the VMAX driver performs similar operations +using the Data Movers on the array. + +Pre-configurations on VMAX +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Enable Unicode on Data Mover. + + The VMAX driver requires that the Unicode is enabled on Data Mover. + + .. warning:: + + After enabling Unicode, you cannot disable it. If there are some + file systems created before Unicode is enabled on the VMAX, + consult the storage administrator before enabling Unicode. + + To check the Unicode status on Data Mover, use the following VMAX eNAS File + commands on the VMAX control station: + + .. code-block:: console + + server_cifs | head + # MOVER_NAME = + + Check the value of I18N mode field. UNICODE mode is shown as + ``I18N mode = UNICODE``. + + To enable the Unicode for Data Mover, use the following command: + + .. code-block:: console + + uc_config -on -mover + # MOVER_NAME = + + Refer to the document Using International Character Sets on VMAX for + File on `EMC support site `_ for more + information. + +#. Enable CIFS service on Data Mover. + + Ensure the CIFS service is enabled on the Data Mover which is going + to be managed by VMAX driver. + + To start the CIFS service, use the following command: + + .. code-block:: console + + server_setup -Protocol cifs -option start [=] + # MOVER_NAME = + # n = + + .. note:: + + If there is 1 GB of memory on the Data Mover, the default is 96 + threads. However, if there is over 1 GB of memory, the default + number of threads is 256. + + To check the CIFS service status, use the following command: + + .. code-block:: console + + server_cifs | head + # MOVER_NAME = + + The command output will show the number of CIFS threads started. + +#. NTP settings on Data Mover. + + VMAX driver only supports CIFS share creation with share network + which has an Active Directory security-service associated. + + Creating CIFS share requires that the time on the Data Mover is in + sync with the Active Directory domain so that the CIFS server can + join the domain. Otherwise, the domain join will fail when creating + a share with this security service. There is a limitation that the + time of the domains used by security-services, even for different + tenants and different share networks, should be in sync. Time + difference should be less than 10 minutes. + + We recommend setting the NTP server to the same public NTP + server on both the Data Mover and domains used in security services + to ensure the time is in sync everywhere. + + Check the date and time on Data Mover with the following command: + + .. code-block:: console + + server_date + # MOVER_NAME = + + Set the NTP server for Data Mover with the following command: + + .. code-block:: console + + server_date timesvc start ntp [ ...] + # MOVER_NAME = + # host = + + .. note:: + + The host must be running the NTP protocol. Only 4 host entries + are allowed. + +#. Configure User Mapping on the Data Mover. + + Before creating CIFS share using VMAX driver, you must select a + method of mapping Windows SIDs to UIDs and GIDs. DELL EMC recommends + using usermapper in single protocol (CIFS) environment which is + enabled on VMAX eNAS by default. + + To check usermapper status, use the following command syntax: + + .. code-block:: console + + server_usermapper + # movername = + + If usermapper does not start, use the following command + to start the usermapper: + + .. code-block:: console + + server_usermapper -enable + # movername = + + For a multiple protocol environment, refer to Configuring VMAX eNAS User + Mapping on `EMC support site `_ for + additional information. + +#. Configure network connection. + + Find the network devices (physical port on NIC) of the Data Mover that + has access to the share network. + + To check the device list, go + to :menuselection:`Unisphere > Settings > Network > Device`. + +Back-end configurations +~~~~~~~~~~~~~~~~~~~~~~~ + +The following parameters need to be configured in the +``/etc/manila/manila.conf`` file for the VMAX driver: + +.. code-block:: ini + + emc_share_backend = vmax + emc_nas_server = + emc_nas_password = + emc_nas_login = + emc_nas_server_container = + emc_nas_pool_names = + share_driver = manila.share.drivers.emc.driver.EMCShareDriver + emc_interface_ports = + +- `emc_share_backend` + The plug-in name. Set it to ``vmax`` for the VMAX driver. + +- `emc_nas_server` + The control station IP address of the VMAX system to be managed. + +- `emc_nas_password` and `emc_nas_login` + The fields that are used to provide credentials to the + VMAX system. Only local users of VMAX File is supported. + +- `emc_nas_server_container` + Name of the Data Mover to serve the share service. + +- `emc_nas_pool_names` + Comma separated list specifying the name of the pools to be used + by this back end. Do not set this option if all storage pools + on the system can be used. + Wild card character is supported. + + Examples: pool_1, pool_*, * + +- `emc_interface_ports` + Comma-separated list specifying the ports (devices) of Data Mover + that can be used for share server interface. Do not set this + option if all ports on the Data Mover can be used. + Wild card character is supported. + + Examples: spa_eth1, spa_*, * + + +Restart of the ``manila-share`` service is needed for the configuration +changes to take effect. + + +Restrictions +~~~~~~~~~~~~ + +The VMAX driver has the following restrictions: + +- Only IP access type is supported for NFS. + +- Only user access type is supported for CIFS. + +- Only FLAT network and VLAN network are supported. + +- VLAN network is supported with limitations. The neutron subnets in + different VLANs that are used to create share networks cannot have + overlapped address spaces. Otherwise, VMAX may have a problem to + communicate with the hosts in the VLANs. To create shares for + different VLANs with same subnet address, use different Data Movers. + +- The **Active Directory** security service is the only supported + security service type and it is required to create CIFS shares. + +- Only one security service can be configured for each share network. + +- The domain name of the ``active_directory`` security + service should be unique even for different tenants. + +- The time on the Data Mover and the Active Directory domains used in + security services should be in sync (time difference should be less + than 10 minutes). We recommended using same NTP server on both + the Data Mover and Active Directory domains. + +- On eNAS, the snapshot is stored in the SavVols. eNAS system allows the + space used by SavVol to be created and extended until the sum of the + space consumed by all SavVols on the system exceeds the default 20% + of the total space available on the system. If the 20% threshold + value is reached, an alert will be generated on eNAS. Continuing to + create snapshot will cause the old snapshot to be inactivated (and + the snapshot data to be abandoned). The limit percentage value can be + changed manually by storage administrator based on the storage needs. + We recommend the administrator configures the notification on the + SavVol usage. Refer to Using eNAS SnapSure document on `EMC support + site `_ for more information. + +- eNAS has limitations on the overall numbers of Virtual Data Movers, + filesystems, shares, and checkpoints. Virtual Data Mover(VDM) is + created by the eNAS driver on the eNAS to serve as the Shared File + Systems service share server. Similarly, the filesystem is created, + mounted, and exported from the VDM over CIFS or NFS protocol to serve + as the Shared File Systems service share. The eNAS checkpoint serves + as the Shared File Systems service share snapshot. Refer to the NAS + Support Matrix document on `EMC support + site `_ for the limitations and configure the + quotas accordingly. + +Driver options +~~~~~~~~~~~~~~ + +Configuration options specific to this driver: + +.. include:: ../../tables/manila-vmax.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/emc-isilon-driver.rst b/doc/source/configuration/shared-file-systems/drivers/emc-isilon-driver.rst new file mode 100644 index 0000000000..01a7edf596 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/emc-isilon-driver.rst @@ -0,0 +1,80 @@ +================= +EMC Isilon driver +================= + +The EMC Shared File Systems driver framework (EMCShareDriver) utilizes +EMC storage products to provide shared file systems to OpenStack. The +EMC driver is a plug-in based driver which is designed to use different +plug-ins to manage different EMC storage products. + +The Isilon driver is a plug-in for the EMC framework which allows the +Shared File Systems service to interface with an Isilon back end to +provide a shared filesystem. The EMC driver framework with the Isilon +plug-in is referred to as the ``Isilon Driver`` in this document. + +This Isilon Driver interfaces with an Isilon cluster via the REST Isilon +Platform API (PAPI) and the RESTful Access to Namespace API (RAN). + +Requirements +~~~~~~~~~~~~ + +- Isilon cluster running OneFS 7.2 or higher + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The drivers supports CIFS and NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported. + - Only read-write access is supported. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +Back end configuration +~~~~~~~~~~~~~~~~~~~~~~ + +The following parameters need to be configured in the Shared File +Systems service configuration file for the Isilon driver: + +.. code-block:: ini + + share_driver = manila.share.drivers.emc.driver.EMCShareDriver + emc_share_backend = isilon + emc_nas_server = + emc_nas_login = + emc_nas_password = + +Restrictions +~~~~~~~~~~~~ + +The Isilon driver has the following restrictions: + +- Only IP access type is supported for NFS and CIFS. + +- Only FLAT network is supported. + +- Quotas are not yet supported. + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to the +share driver. + +.. include:: ../../tables/manila-emc.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/emc-unity-driver.rst b/doc/source/configuration/shared-file-systems/drivers/emc-unity-driver.rst new file mode 100644 index 0000000000..9f0fe0137b --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/emc-unity-driver.rst @@ -0,0 +1,265 @@ +================ +EMC Unity driver +================ + +The EMC Shared File Systems service driver framework (EMCShareDriver) +utilizes the EMC storage products to provide the shared file systems to +OpenStack. The EMC driver is a plug-in based driver which is designed to +use different plug-ins to manage different EMC storage products. + +The Unity plug-in manages the Unity system to provide shared filesystems. +The EMC driver framework with the Unity plug-in is referred to as the +Unity driver in this document. + +This driver performs the operations on Unity through RESTful APIs. Each back +end manages one Storage Processor of Unity. Configure multiple Shared File +Systems service back ends to manage multiple Unity systems. + +Requirements +~~~~~~~~~~~~ + +- Unity OE 4.1.x or higher. + +- StorOps 0.4.3 or higher is installed on Manila node. + +- Following licenses are activated on Unity: + + - CIFS/SMB Support + + - Network File System (NFS) + + - Thin Provisioning + + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports CIFS and NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +- Extend a share. + + +Supported network types +~~~~~~~~~~~~~~~~~~~~~~~ + +- ``Flat`` + + This type is fully supported by Unity share driver, however flat networks are + restricted due to the limited number of tenant networks that can be created + from them. + +- ``VLAN`` + + We recommend this type of network topology in Manila. + In most use cases, VLAN is used to isolate the different tenants and provide + an isolated network for each tenant. To support this function, an + administrator needs to set a slot connected with Unity Ethernet port in + ``Trunk`` mode or allow multiple VLANs from the slot. + +- ``VXLAN`` + + Unity native VXLAN is still unavailable. However, with the `HPB + `_ + (Hierarchical Port Binding) in Networking and Shared file system services, + it is possible that Unity co-exists with VXLAN enabled network environment. + +Supported MTU size +~~~~~~~~~~~~~~~~~~ + +Unity currently only supports 1500 and 9000 as the mtu size, the user can +change the above mtu size from Unity Unisphere: + +#. In the Unisphere, go to `Settings`, `Access`, and then `Ethernet`. +#. Double click the ethernet port. +#. Select the `MTU` size from the drop down list. + +The Unity driver will select the port where mtu is equal to the mtu +of share network during share server creation. + + +Supported security services +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Unity share driver provides ``IP`` based authentication method support for +``NFS`` shares and ``user`` based authentication method for ``CIFS`` shares +respectively. For ``CIFS`` share, Microsoft Active Directory is the only +supported security services. + +Pre-configurations +~~~~~~~~~~~~~~~~~~ + +On manila node +-------------- + +Python library ``storops`` is required to run Unity driver. +Install it with the ``pip`` command. +You may need root privilege to install python libraries. + +.. code-block:: console + + pip install storops + +On Unity system +--------------- + +#. Configure system level NTP server. + + Open ``Unisphere`` of your Unity system and navigate to: + + .. code-block:: console + + Unisphere -> Settings -> Management -> System Time and NTP + + Select ``Enable NTP synchronization`` and add your NTP server(s). + + The time on the Unity system and the Active Directory domains + used in security services should be in sync. We recommend + using the same NTP server on both the Unity system and Active + Directory domains. + +#. Configure system level DNS server. + + Open ``Unisphere`` of your Unity system and navigate to: + + .. code-block:: console + + Unisphere -> Settings -> Management -> DNS Server + + Select ``Configure DNS server address manually`` and add your DNS server(s). + + +Back end configurations +~~~~~~~~~~~~~~~~~~~~~~~ + +Following configurations need to be configured in ``/etc/manila/manila.conf`` +for the Unity driver. + +.. code-block:: ini + + share_driver = manila.share.drivers.dell_emc.driver.EMCShareDriver + emc_share_backend = unity + emc_nas_server = + emc_nas_login = + emc_nas_password = + unity_server_meta_pool = + unity_share_data_pools = + unity_ethernet_ports = + driver_handles_share_servers = True + +- ``emc_share_backend`` + The plugin name. Set it to `unity` for the Unity driver. + +- ``emc_nas_server`` + The management IP for Unity. + +- ``emc_nas_login`` + The user with administrator privilege. + +- ``emc_nas_passowrd`` + Password for the user. + +- ``unity_server_meta_pool`` + The name of the pool to persist the meta-data of NAS server. + +- ``unity_share_data_pools`` + Comma separated list specifying the name of the pools to be used + by this back end. Do not set this option if all storage pools + on the system can be used. + Wild card character is supported. + + Examples: + + .. code-block:: ini + + # Only use pool_1 + unity_share_data_pools = pool_1 + # Only use pools whose name stars from pool_ + unity_share_data_pools = pool_* + # Use all pools on Unity + unity_share_data_pools = * + +- ``unity_ethernet_ports`` + Comma separated list specifying the ethernet ports of Unity system + that can be used for share. Do not set this option if all ethernet ports + can be used. + Wild card character is supported. Both the normal ethernet port and link + aggregation port can be used by Unity share driver. + + + Examples: + + .. code-block:: ini + + # Only use spa_eth1 + unity_ethernet_ports = spa_eth1 + # Use port whose name stars from spa_ + unity_ethernet_ports = spa_* + # Use all Link Aggregation ports + unity_ethernet_ports = sp*_la_* + # Use all available ports + unity_ethernet_ports = * + + + .. note:: + + Refer to :ref:`unity_file_io_load_balance` for performance + impact. + +- ``driver_handles_share_servers`` + Unity driver requires this option to be as ``True``. + + +Restart of ``manila-share`` service is needed for the configuration +changes to take effect. + + +.. _unity_file_io_load_balance: + + +IO Load balance +~~~~~~~~~~~~~~~ + +The Unity driver automatically distributes the file interfaces per storage +processor based on the option ``unity_ethernet_ports``. This balances IO +traffic. The recommended configuration for ``unity_ethernet_ports`` specifies +balanced ports per storage processor. For example: + +.. code-block:: ini + + # Use eth2 from both SPs + unity_ethernet_ports = spa_eth2, spb_eth2 + + +Restrictions +~~~~~~~~~~~~ + +The Unity driver has following restrictions. + +- EMC Unity does not support the same IP in different VLANs. + +- Only Active Directory security service is supported and it is + required to create CIFS shares. + + +Driver options +~~~~~~~~~~~~~~ + +Configuration options specific to this driver: + +.. include:: ../../tables/manila-unity.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/emc-vnx-driver.rst b/doc/source/configuration/shared-file-systems/drivers/emc-vnx-driver.rst new file mode 100644 index 0000000000..e2c496ea6e --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/emc-vnx-driver.rst @@ -0,0 +1,296 @@ +=================== +Dell EMC VNX driver +=================== + +The EMC Shared File Systems service driver framework (EMCShareDriver) +utilizes the EMC storage products to provide the shared file systems to +OpenStack. The EMC driver is a plug-in based driver which is designed to +use different plug-ins to manage different EMC storage products. + +The VNX plug-in is the plug-in which manages the VNX to provide shared +filesystems. The EMC driver framework with the VNX plug-in is referred +to as the VNX driver in this document. + +This driver performs the operations on VNX by XMLAPI and the file +command line. Each back end manages one Data Mover of VNX. Multiple +Shared File Systems service back ends need to be configured to manage +multiple Data Movers. + +Requirements +~~~~~~~~~~~~ + +- VNX OE for File version 7.1 or higher + +- VNX Unified, File only, or Gateway system with a single storage back + end + +- The following licenses should be activated on VNX for File: + + - CIFS + + - NFS + + - SnapSure (for snapshot) + + - ReplicationV2 (for create share from snapshot) + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports CIFS and NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported for NFS. + - Only user access type is supported for CIFS. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +While the generic driver creates shared filesystems based on cinder +volumes attached to nova VMs, the VNX driver performs similar operations +using the Data Movers on the array. + +Pre-configurations on VNX +~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Enable unicode on Data Mover. + + The VNX driver requires that the unicode is enabled on Data Mover. + + .. warning:: + + After enabling Unicode, you cannot disable it. If there are some + filesystems created before Unicode is enabled on the VNX, + consult the storage administrator before enabling Unicode. + + To check the Unicode status on Data Mover, use the following VNX File + command on the VNX control station:: + + server_cifs | head + # mover_name = + + Check the value of I18N mode field. UNICODE mode is shown as + ``I18N mode = UNICODE``. + + To enable the Unicode for Data Mover:: + + uc_config -on -mover + # mover_name = + + Refer to the document Using International Character Sets on VNX for + File on `EMC support site `_ for more + information. + +#. Enable CIFS service on Data Mover. + + Ensure the CIFS service is enabled on the Data Mover which is going + to be managed by VNX driver. + + To start the CIFS service, use the following command:: + + server_setup -Protocol cifs -option start [=] + # mover_name = + # n = + + .. note:: + + If there is 1 GB of memory on the Data Mover, the default is 96 + threads; however, if there is over 1 GB of memory, the default + number of threads is 256. + + To check the CIFS service status, use this command:: + + server_cifs | head + # mover_name = + + The command output will show the number of CIFS threads started. + +#. NTP settings on Data Mover. + + VNX driver only supports CIFS share creation with share network + which has an Active Directory security-service associated. + + Creating CIFS share requires that the time on the Data Mover is in + sync with the Active Directory domain so that the CIFS server can + join the domain. Otherwise, the domain join will fail when creating + share with this security service. There is a limitation that the + time of the domains used by security-services even for different + tenants and different share networks should be in sync. Time + difference should be less than 10 minutes. + + It is recommended to set the NTP server to the same public NTP + server on both the Data Mover and domains used in security services + to ensure the time is in sync everywhere. + + Check the date and time on Data Mover:: + + server_date + # mover_name = + + Set the NTP server for Data Mover:: + + server_date timesvc start ntp [ ...] + # mover_name = + # host = + + .. note:: + + The host must be running the NTP protocol. Only 4 host entries + are allowed. + +#. Configure User Mapping on the Data Mover. + + Before creating CIFS share using VNX driver, you must select a + method of mapping Windows SIDs to UIDs and GIDs. EMC recommends + using usermapper in single protocol (CIFS) environment which is + enabled on VNX by default. + + To check usermapper status, use this command syntax:: + + server_usermapper + # movername = + + If usermapper is not started, the following command can be used + to start the usermapper:: + + server_usermapper -enable + # movername = + + For a multiple protocol environment, refer to Configuring VNX User + Mapping on `EMC support site `_ for + additional information. + +#. Network Connection. + + Find the network devices (physical port on NIC) of Data Mover that + has access to the share network. + + Go to :guilabel:`Unisphere` to check the device list: + :menuselection:`Settings > Network > Settings for File (Unified system + only) > Device`. + + +Back-end configurations +~~~~~~~~~~~~~~~~~~~~~~~ + +The following parameters need to be configured in the +``/etc/manila/manila.conf`` file for the VNX driver: + +.. code-block:: ini + + emc_share_backend = vnx + emc_nas_server = + emc_nas_password = + emc_nas_login = + vnx_server_container = + vnx_share_data_pools = + share_driver = manila.share.drivers.emc.driver.EMCShareDriver + vnx_ethernet_ports = + +- `emc_share_backend` + The plug-in name. Set it to ``vnx`` for the VNX driver. + +- `emc_nas_server` + The control station IP address of the VNX system to be managed. + +- `emc_nas_password` and `emc_nas_login` + The fields that are used to provide credentials to the + VNX system. Only local users of VNX File is supported. + +- `vnx_server_container` + Name of the Data Mover to serve the share service. + +- `vnx_share_data_pools` + Comma separated list specifying the name of the pools to be used + by this back end. Do not set this option if all storage pools + on the system can be used. + Wild card character is supported. + + Examples: pool_1, pool_*, * + +- `vnx_ethernet_ports` + Comma separated list specifying the ports (devices) of Data Mover + that can be used for share server interface. Do not set this + option if all ports on the Data Mover can be used. + Wild card character is supported. + + Examples: spa_eth1, spa_*, * + + +Restart of the ``manila-share`` service is needed for the configuration +changes to take effect. + + +Restrictions +~~~~~~~~~~~~ + +The VNX driver has the following restrictions: + +- Only IP access type is supported for NFS. + +- Only user access type is supported for CIFS. + +- Only FLAT network and VLAN network are supported. + +- VLAN network is supported with limitations. The neutron subnets in + different VLANs that are used to create share networks cannot have + overlapped address spaces. Otherwise, VNX may have a problem to + communicate with the hosts in the VLANs. To create shares for + different VLANs with same subnet address, use different Data Movers. + +- The ``Active Directory`` security service is the only supported + security service type and it is required to create CIFS shares. + +- Only one security service can be configured for each share network. + +- Active Directory domain name of the 'active\_directory' security + service should be unique even for different tenants. + +- The time on Data Mover and the Active Directory domains used in + security services should be in sync (time difference should be less + than 10 minutes). It is recommended to use same NTP server on both + the Data Mover and Active Directory domains. + +- On VNX the snapshot is stored in the SavVols. VNX system allows the + space used by SavVol to be created and extended until the sum of the + space consumed by all SavVols on the system exceeds the default 20% + of the total space available on the system. If the 20% threshold + value is reached, an alert will be generated on VNX. Continuing to + create snapshot will cause the old snapshot to be inactivated (and + the snapshot data to be abandoned). The limit percentage value can be + changed manually by storage administrator based on the storage needs. + Administrator is recommended to configure the notification on the + SavVol usage. Refer to Using VNX SnapSure document on `EMC support + site `_ for more information. + +- VNX has limitations on the overall numbers of Virtual Data Movers, + filesystems, shares, checkpoints, etc. Virtual Data Mover(VDM) is + created by the VNX driver on the VNX to serve as the Shared File + Systems service share server. Similarly, filesystem is created, + mounted, and exported from the VDM over CIFS or NFS protocol to serve + as the Shared File Systems service share. The VNX checkpoint serves + as the Shared File Systems service share snapshot. Refer to the NAS + Support Matrix document on `EMC support + site `_ for the limitations and configure the + quotas accordingly. + +Driver options +~~~~~~~~~~~~~~ + +Configuration options specific to this driver: + +.. include:: ../../tables/manila-vnx.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/generic-driver.rst b/doc/source/configuration/shared-file-systems/drivers/generic-driver.rst new file mode 100644 index 0000000000..1604552ac9 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/generic-driver.rst @@ -0,0 +1,110 @@ +======================================= +Generic approach for share provisioning +======================================= + +The Shared File Systems service can be configured to use Compute VMs +and Block Storage service volumes. There are two modules that handle +them in the Shared File Systems service: + +- The ``service_instance`` module creates VMs in Compute with a + predefined image called ``service image``. This module can be used by + any driver for provisioning of service VMs to be able to separate + share resources among tenants. + +- The ``generic`` module operates with Block Storage service volumes + and VMs created by the ``service_instance`` module, then creates + shared filesystems based on volumes attached to VMs. + +Network configurations +~~~~~~~~~~~~~~~~~~~~~~ + +Each driver can handle networking in its own way, see: +https://wiki.openstack.org/wiki/manila/Networking. + +One of the two possible configurations can be chosen for share provisioning +using the ``service_instance`` module: + +- Service VM has one network interface from a network that is + connected to a public router. For successful creation of a share, + the user network should be connected to a public router, too. + +- Service VM has two network interfaces, the first one is connected to + the service network, the second one is connected directly to the + user's network. + +Requirements for service image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Linux based distro + +- NFS server + +- Samba server >= 3.2.0, that can be configured by data stored in + registry + +- SSH server + +- Two network interfaces configured to DHCP (see network approaches) + +- ``exportfs`` and ``net conf`` libraries used for share actions + +- The following files will be used, so if their paths differ one + needs to create at least symlinks for them: + + - ``/etc/exports``: permanent file with NFS exports. + + - ``/var/lib/nfs/etab``: temporary file with NFS exports used by + ``exportfs``. + + - ``/etc/fstab``: permanent file with mounted filesystems. + + - ``/etc/mtab``: temporary file with mounted filesystems used by + ``mount``. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports CIFS and NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported for NFS and CIFS. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +- Extend a share. + +- Shrink a share. + + +Known restrictions +~~~~~~~~~~~~~~~~~~ + +- One of nova's configurations only allows 26 shares per server. This + limit comes from the maximum number of virtual PCI interfaces that + are used for block device attaching. There are 28 virtual PCI + interfaces, in this configuration, two of them are used for server + needs and the other 26 are used for attaching block devices that + are used for shares. + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to this +driver. + +.. include:: ../../tables/manila-generic.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/glusterfs-driver.rst b/doc/source/configuration/shared-file-systems/drivers/glusterfs-driver.rst new file mode 100644 index 0000000000..557f6355c1 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/glusterfs-driver.rst @@ -0,0 +1,92 @@ +================ +GlusterFS driver +================ + +GlusterFS driver uses GlusterFS, an open source distributed file system, +as the storage back end for serving file shares to the Shared File +Systems clients. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported + + - Only read-write access is supported. + +- Deny share access. + +Requirements +~~~~~~~~~~~~ + +- Install glusterfs-server package, version >= 3.5.x, on the storage + back end. + +- Install NFS-Ganesha, version >=2.1, if using NFS-Ganesha as the NFS + server for the GlusterFS back end. + +- Install glusterfs and glusterfs-fuse package, version >=3.5.x, on the + Shared File Systems service host. + +- Establish network connection between the Shared File Systems service + host and the storage back end. + +Shared File Systems service driver configuration setting +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following parameters in the Shared File Systems service's +configuration file ``manila.conf`` need to be set: + +.. code-block:: ini + + share_driver = manila.share.drivers.glusterfs.GlusterfsShareDriver + +If the back-end GlusterFS server runs on the Shared File Systems +service host machine: + +.. code-block:: ini + + glusterfs_target = :/ + +If the back-end GlusterFS server runs remotely: + +.. code-block:: ini + + glusterfs_target = @:/ + +Known restrictions +~~~~~~~~~~~~~~~~~~ + +- The driver does not support network segmented multi-tenancy model, + but instead works over a flat network, where the tenants share a + network. + +- If NFS Ganesha is the NFS server used by the GlusterFS back end, then + the shares can be accessed by NFSv3 and v4 protocols. However, if + Gluster NFS is used by the GlusterFS back end, then the shares can + only be accessed by NFSv3 protocol. + +- All Shared File Systems service shares, which map to subdirectories + within a GlusterFS volume, are currently created within a single + GlusterFS volume of a GlusterFS storage pool. + +- The driver does not provide read-only access level for shares. + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to the +share driver. + +.. include:: ../../tables/manila-glusterfs.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/glusterfs-native-driver.rst b/doc/source/configuration/shared-file-systems/drivers/glusterfs-native-driver.rst new file mode 100644 index 0000000000..1b47402e4b --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/glusterfs-native-driver.rst @@ -0,0 +1,123 @@ +======================= +GlusterFS Native driver +======================= + +GlusterFS Native driver uses GlusterFS, an open source distributed file +system, as the storage back end for serving file shares to Shared File +Systems service clients. + +A Shared File Systems service share is a GlusterFS volume. This driver +uses flat-network (share-server-less) model. Instances directly talk +with the GlusterFS back end storage pool. The instances use ``glusterfs`` +protocol to mount the GlusterFS shares. Access to each share is allowed +via TLS Certificates. Only the instance which has the TLS trust +established with the GlusterFS back end can mount and hence use the +share. Currently only ``read-write (rw)`` access is supported. + +Network approach +~~~~~~~~~~~~~~~~ + +L3 connectivity between the storage back end and the host running the +Shared File Systems share service should exist. + +Multi-tenancy model +~~~~~~~~~~~~~~~~~~~ + +The driver does not support network segmented multi-tenancy model. +Instead multi-tenancy is supported using tenant specific TLS +certificates. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports GlusterFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only access by TLS Certificates (``cert`` access type) is supported. + + - Only read-write access is supported. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +Requirements +~~~~~~~~~~~~ + +- Install glusterfs-server package, version >= 3.6.x, on the storage + back end. + +- Install glusterfs and glusterfs-fuse package, version >= 3.6.x, on the + Shared File Systems service host. + +- Establish network connection between the Shared File Systems service + host and the storage back end. + +Shared File Systems service driver configuration setting +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following parameters in the Shared File Systems service's +configuration file need to be set: + +.. code-block:: ini + + share_driver = manila.share.drivers.glusterfs_native.GlusterfsNativeShareDriver + glusterfs_servers = glustervolserver + glusterfs_volume_pattern = manila-share-volume-\d+$ + +The parameters are: + +``glusterfs_servers`` + List of GlusterFS servers which provide volumes that can be used to + create shares. The servers are expected to be of distinct Gluster + clusters, so they should not be Gluster peers. Each server should + be of the form ``[@]``. + + The optional ``@`` part of the server URI indicates + SSH access for cluster management (see related optional + parameters below). If it is not given, direct command line + management is performed (the Shared File Systems service host is + assumed to be part of the GlusterFS cluster the server belongs + to). + +``glusterfs_volume_pattern`` + Regular expression template used to filter GlusterFS volumes for + share creation. The regular expression template can contain the + ``#{size}`` parameter which matches a number and the value will be + interpreted as size of the volume in GB. Examples: + ``manila-share-volume-\d+$``, + ``manila-share-volume-#{size}G-\d+$``; with matching volume names, + respectively: ``manila-share-volume-12``, + ``manila-share-volume-3G-13``. In the latter example, the number + that matches ``#{size}``, which is 3, is an indication that the + size of volume is 3 GB. On share creation, the Shared File Systems + service picks volumes at least as large as the requested one. + +When setting up GlusterFS shares, note the following: + +- GlusterFS volumes are not created on demand. A pre-existing set of + GlusterFS volumes should be supplied by the GlusterFS cluster(s), + conforming to the naming convention encoded by + ``glusterfs_volume_pattern``. However, the GlusterFS endpoint is + allowed to extend this set any time, so the Shared File Systems + service and GlusterFS endpoints are expected to communicate volume + supply and demand out-of-band. + +- Certificate setup, also known as trust setup, between instance and + storage back end is out of band of the Shared File Systems service. + +- For the Shared File Systems service to use GlusterFS volumes, the + name of the trashcan directory in GlusterFS volumes must not be + changed from the default. + diff --git a/doc/source/configuration/shared-file-systems/drivers/hdfs-native-driver.rst b/doc/source/configuration/shared-file-systems/drivers/hdfs-native-driver.rst new file mode 100644 index 0000000000..2a3bd0515e --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/hdfs-native-driver.rst @@ -0,0 +1,85 @@ +================== +HDFS native driver +================== + +The HDFS native driver is a plug-in for the Shared File Systems +service. It uses Hadoop distributed file system (HDFS), a distributed +file system designed to hold very large amounts of data, and provide +high-throughput access to the data. + +A Shared File Systems service share in this driver is a subdirectory +in the hdfs root directory. Instances talk directly to the HDFS +storage back end using the ``hdfs`` protocol. Access to each share +is allowed by user based access type, which is aligned with HDFS ACLs +to support access control of multiple users and groups. + +Network configuration +~~~~~~~~~~~~~~~~~~~~~ + +The storage back end and Shared File Systems service hosts should be +in a flat network, otherwise L3 connectivity between them should +exist. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports HDFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only user access type is supported. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + + +Requirements +~~~~~~~~~~~~ + +- Install HDFS package, version >= 2.4.x, on the storage back end. + +- To enable access control, the HDFS file system must have ACLs + enabled. + +- Establish network connection between the Shared File Systems service + host and storage back end. + +Shared File Systems service driver configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To enable the driver, set the ``share_driver`` option in file +``manila.conf`` and add other options as appropriate. + +.. code-block:: ini + + share_driver = manila.share.drivers.hdfs.hdfs_native.HDFSNativeShareDriver + +Known restrictions +~~~~~~~~~~~~~~~~~~ + +- This driver does not support network segmented multi-tenancy model. + Instead multi-tenancy is supported by the tenant specific user + authentication. + +- Only support for single HDFS namenode in Kilo release. + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to the +share driver. + +.. include:: ../../tables/manila-hdfs.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/hitachi-hnas-driver.rst b/doc/source/configuration/shared-file-systems/drivers/hitachi-hnas-driver.rst new file mode 100644 index 0000000000..4d4330f8a3 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/hitachi-hnas-driver.rst @@ -0,0 +1,482 @@ +========================= +Hitachi NAS (HNAS) driver +========================= + +The HNAS driver provides NFS Shared File Systems to OpenStack. + +Requirements +~~~~~~~~~~~~ + +- Hitachi NAS Platform Models 3080, 3090, 4040, 4060, 4080, and 4100. + +- HNAS/SMU software version is 12.2 or higher. + +- HNAS configuration and management utilities to create a storage pool (span) + and an EVS. + + - GUI (SMU). + + - SSC CLI. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports NFS and CIFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +- Revert a share to a snapshot. + +- Extend a share. + +- Manage a share. + +- Unmanage a share. + +- Shrink a share. + +- Mount snapshots. + +- Allow snapshot access. + +- Deny snapshot access. + +- Manage a snapshot. + +- Unmanage a snapshot. + +Driver options +~~~~~~~~~~~~~~ + +This table contains the configuration options specific to the share driver. + +.. include:: ../../tables/manila-hds_hnas.inc + +Pre-configuration on OpenStack deployment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Install the OpenStack environment with manila. See the + `OpenStack installation guide `_. + +#. Configure the OpenStack networking so it can reach HNAS Management + interface and HNAS EVS Data interface. + + .. note :: + + In the driver mode used by HNAS Driver (DHSS = ``False``), the driver + does not handle network configuration, it is up to the administrator to + configure it. + + * Configure the network of the manila-share node network to reach HNAS + management interface through the admin network. + + * Configure the network of the Compute and Networking nodes to reach HNAS + EVS data interface through the data network. + + * Example of networking architecture: + + .. figure:: ../../figures/hds_network.jpg + :width: 60% + :align: center + :alt: Example networking scenario + + * Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and update the + following settings in their respective tags. In case you use linuxbridge, + update bridge mappings at linuxbridge section: + + .. important :: + + It is mandatory that HNAS management interface is reachable from the + Shared File System node through the admin network, while the selected + EVS data interface is reachable from OpenStack Cloud, such as through + Neutron flat networking. + + .. code-block:: ini + + [ml2] + type_drivers = flat,vlan,vxlan,gre + mechanism_drivers = openvswitch + [ml2_type_flat] + flat_networks = physnet1,physnet2 + [ml2_type_vlan] + network_vlan_ranges = physnet1:1000:1500,physnet2:2000:2500 + [ovs] + bridge_mappings = physnet1:br-ex,physnet2:br-eth1 + + You may have to repeat the last line above in another file on the Compute + node, if it exists it is located in: + ``/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini``. + + * In case openvswitch for neutron agent, run in network node: + + .. code-block:: console + + # ifconfig eth1 0 + # ovs-vsctl add-br br-eth1 + # ovs-vsctl add-port br-eth1 eth1 + # ifconfig eth1 up + + * Restart all neutron processes. + +#. Create the data HNAS network in OpenStack: + + * List the available projects: + + .. code-block:: console + + $ openstack project list + + * Create a network to the given project (DEMO), providing the project name, + a name for the network, the name of the physical network over which the + virtual network is implemented, and the type of the physical mechanism by + which the virtual network is implemented: + + .. code-block:: console + + $ openstack network create --project DEMO \ + --provider-network-type flat \ + --provider-physical-network physnet2 hnas_network + + * Optional: List available networks: + + .. code-block:: console + + $ openstack network list + + * Create a subnet to the same project (DEMO), the gateway IP of this subnet, + a name for the subnet, the network name created before, and the CIDR of + subnet: + + .. code-block:: console + + $ openstack subnet create --project DEMO --gateway GATEWAY \ + --subnet-range SUBNET_CIDR --network NETWORK HNAS_SUBNET + + * Optional: List available subnets: + + .. code-block:: console + + $ openstack subnet list + + * Add the subnet interface to a router, providing the router name and + subnet name created before: + + .. code-block:: console + + $ openstack router add subnet SUBNET ROUTER + +Pre-configuration on HNAS +~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Create a file system on HNAS. See the + `Hitachi HNAS reference `_. + + .. important:: + + Make sure that the filesystem is not created as a replication target. + For more information, refer to the official HNAS administration guide. + +#. Prepare the HNAS EVS network. + + * Create a route in HNAS to the project network: + + .. code-block:: console + + $ console-context --evs route-net-add \ + --gateway + + .. important:: + + Make sure multi-tenancy is enabled and routes are configured + per EVS. + + .. code-block:: console + + $ console-context --evs 3 route-net-add --gateway 192.168.1.1 \ + 10.0.0.0/24 + +#. Configure the CIFS security. + + * Before using CIFS shares with the HNAS driver, make sure to configure a + security service in the back end. For details, refer to the `Hitachi HNAS + reference + `_. + +Back end configuration +~~~~~~~~~~~~~~~~~~~~~~ + +#. Configure HNAS driver. + + * Configure HNAS driver according to your environment. This example shows + a minimal HNAS driver configuration: + + .. code-block:: ini + + [DEFAULT] + enabled_share_backends = hnas1 + enabled_share_protocols = NFS,CIFS + + [hnas1] + share_backend_name = HNAS1 + share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver + driver_handles_share_servers = False + hitachi_hnas_ip = 172.24.44.15 + hitachi_hnas_user = supervisor + hitachi_hnas_password = supervisor + hitachi_hnas_evs_id = 1 + hitachi_hnas_evs_ip = 10.0.1.20 + hitachi_hnas_file_system_name = FS-Manila + hitachi_hnas_cifs_snapshot_while_mounted = True + + .. note:: + + The ``hds_hnas_cifs_snapshot_while_mounted`` parameter allows snapshots + to be taken while CIFS shares are mounted. This parameter is set to + ``False`` by default, which prevents a snapshot from being taken if the + share is mounted or in use. + +#. Optional. HNAS multi-backend configuration. + + * Update the ``enabled_share_backends`` flag with the names of the back + ends separated by commas. + + * Add a section for every back end according to the example bellow: + + .. code-block:: ini + + [DEFAULT] + enabled_share_backends = hnas1,hnas2 + enabled_share_protocols = NFS,CIFS + + [hnas1] + share_backend_name = HNAS1 + share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver + driver_handles_share_servers = False + hitachi_hnas_ip = 172.24.44.15 + hitachi_hnas_user = supervisor + hitachi_hnas_password = supervisor + hitachi_hnas_evs_id = 1 + hitachi_hnas_evs_ip = 10.0.1.20 + hitachi_hnas_file_system_name = FS-Manila1 + hitachi_hnas_cifs_snapshot_while_mounted = True + + [hnas2] + share_backend_name = HNAS2 + share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver + driver_handles_share_servers = False + hitachi_hnas_ip = 172.24.44.15 + hitachi_hnas_user = supervisor + hitachi_hnas_password = supervisor + hitachi_hnas_evs_id = 1 + hitachi_hnas_evs_ip = 10.0.1.20 + hitachi_hnas_file_system_name = FS-Manila2 + hitachi_hnas_cifs_snapshot_while_mounted = True + +#. Disable DHSS for HNAS share type configuration: + + .. note:: + + Shared File Systems requires that the share type includes the + ``driver_handles_share_servers`` extra-spec. This ensures that the share + will be created on a back end that supports the requested + ``driver_handles_share_servers`` capability. + + .. code-block:: console + + $ manila type-create hitachi False + +#. Optional: Add extra-specs for enabling HNAS-supported features: + + * These commands will enable various snapshot-related features that are + supported in HNAS. + + .. code-block:: console + + $ manila type-key hitachi set snapshot_support=True + $ manila type-key hitachi set mount_snapshot_support=True + $ manila type-key hitachi set revert_to_snapshot_support=True + $ manila type-key hitachi set create_share_from_snapshot_support=True + + * To specify which HNAS back end will be created by the share, in case of + multiple back end setups, add an extra-spec for each share-type to match + a specific back end. Therefore, it is possible to specify which back end + the Shared File System service will use when creating a share. + + .. code-block:: console + + $ manila type-key hitachi set share_backend_name=hnas1 + $ manila type-key hitachi2 set share_backend_name=hnas2 + +#. Restart all Shared File Systems services (``manila-share``, + ``manila-scheduler`` and ``manila-api``). + +Share migration +~~~~~~~~~~~~~~~ + +Extra configuration is needed for allowing shares to be migrated from or to +HNAS. In the OpenStack deployment, the manila-share node needs an additional +connection to the EVS data interface. Furthermore, make sure to add +``hitachi_hnas_admin_network_ip`` to the configuration. This should match the +value of ``data_node_access_ip``. For more in-depth documentation, +refer to the `share migration documents +`_ + +Manage and unmanage shares +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Shared File Systems has the ability to manage and unmanage shares. If there is +a share in the storage and it is not in OpenStack, you can manage that share +and use it as a Shared File Systems share. Administrators have to make sure the +exports are under the ``/shares`` folder beforehand. HNAS drivers use +virtual-volumes (V-VOL) to create shares. Only V-VOL shares can be used by the +driver, and V-VOLs must have a quota limit. If the NFS export is an ordinary FS +export, it is not possible to use it in Shared File Systems. The unmanage +operation only unlinks the share from Shared File Systems, all data is +preserved. Both manage and unmanage operations are non-disruptive by default, +until access rules are modified. + +To **manage** a share, use: + +.. code-block:: console + + $ manila manage [--name ] [--description ] + [--share_type ] + [--driver_options [ [ ...]]] + [--public] + + +Where: + ++--------------------+------------------------------------------------------+ +| **Parameter** | **Description** | ++====================+======================================================+ +| | Manila host, back end and share name. For example, | +| ``service_host`` | ``ubuntu@hitachi1#hsp1``. The available hosts can | +| | be listed with the command: ``manila pool-list`` | +| | (admin only). | ++--------------------+------------------------------------------------------+ +| ``protocol`` | Protocol of share to manage, such as NFS or CIFS. | ++--------------------+------------------------------------------------------+ +| ``export_path`` | Share export path. | +| | For NFS: ``10.0.0.1:/shares/share_name`` | +| | | +| | For CIFS: ``\\10.0.0.1\share_name`` | ++--------------------+------------------------------------------------------+ + +.. note:: + For NFS exports, ``export_path`` **must** include ``/shares/`` after the + target address. Trying to reference the share name directly or under another + path will fail. + +.. note:: + For CIFS exports, although the shares will be created under the ``/shares/`` + folder in the back end, only the share name is needed in the export path. It + should also be noted that the backslash ``\`` character has to be escaped + when entered in Linux terminals. + +For additional details, refer to ``manila help manage`` or the +`OpenStack Shared File Systems documentation +`_. + +To **unmanage** a share, use: + +.. code-block:: console + + $ manila unmanage + +Where: + ++------------------+---------------------------------------------------------+ +| **Parameter** | **Description** | ++==================+=========================================================+ +| ``share`` | ID or name of the share to be unmanaged. A list of | +| | shares can be fetched with ``manila list``. | ++------------------+---------------------------------------------------------+ + +Manage and unmanage snapshots +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Shared File Systems service also has the ability to manage share +snapshots. Existing HNAS snapshots can be managed, as long as the snapshot +directory is located in ``/snapshots/share_ID``. New snapshots created through +the Shared File Systems service are also created according to this specific +folder structure. + +To **manage** a snapshot, use: + +.. code-block:: console + + $ manila snapshot-manage [--name ] [--description ] + [--driver_options [ [ ...]]] + + +Where: + ++------------------------+-------------------------------------------------+ +| **Parameter** | **Description** | ++========================+=================================================+ +| ``share`` | ID or name of the share to be managed. A list | +| | of shares can be fetched with ``manila list``. | ++------------------------+-------------------------------------------------+ +| ``provider_location`` | Location of the snapshot on the back end, such | +| | as ``/snapshots/share_ID/snapshot_ID``. | ++------------------------+-------------------------------------------------+ +| ``--driver_options`` | Driver-related configuration, passed such as | +| | ``size=10``. | ++------------------------+-------------------------------------------------+ + +.. note:: + The mandatory ``provider_location`` parameter uses the same syntax for both + NFS and CIFS shares. This is only the case for snapshot management. + +.. note:: + The ``--driver_options`` parameter ``size`` is **required** for the HNAS + driver. Administrators need to know the size of the to-be-managed + snapshot beforehand. + +.. note:: + If the ``mount_snapshot_support=True`` extra-spec is set in the share type, + the HNAS driver will automatically create an export when managing a snapshot + if one does not already exist. + +To **unmanage** a snapshot, use: + +.. code-block:: console + + $ manila snapshot-unmanage + +Where: + ++---------------+--------------------------------+ +| **Parameter** | **Description** | ++===============+================================+ +| ``snapshot`` | Name or ID of the snapshot(s). | ++---------------+--------------------------------+ + +Additional notes +~~~~~~~~~~~~~~~~ + +* HNAS has some restrictions about the number of EVSs, filesystems, + virtual-volumes, and simultaneous SSC connections. Check the manual + specification for your system. +* Shares and snapshots are thin provisioned. It is reported to Shared File + System only the real used space in HNAS. Also, a snapshot does not initially + take any space in HNAS, it only stores the difference between the share and + the snapshot, so it grows when share data is changed. +* Administrators should manage the project's quota + (:command:`manila quota-update`) to control the back end usage. +* Shares will need to be remounted after a revert-to-snapshot operation. diff --git a/doc/source/configuration/shared-file-systems/drivers/hitachi-hsp-driver.rst b/doc/source/configuration/shared-file-systems/drivers/hitachi-hsp-driver.rst new file mode 100644 index 0000000000..93941d0392 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/hitachi-hsp-driver.rst @@ -0,0 +1,212 @@ +=================================================================== +Hitachi Hyper Scale-Out Platform File Services Driver for OpenStack +=================================================================== + + +The Hitachi Hyper Scale-Out Platform File Services Driver for OpenStack +provides the management of file shares, supporting NFS shares with IP based +rules to control access. It has a layer that handles the complexity of the +protocol used to communicate to Hitachi Hyper Scale-Out Platform via a +RESTful API, formatting and sending requests to the backend. + + +Requirements +~~~~~~~~~~~~ + +- Hitachi Hyper Scale-Out Platform (HSP) version 1.1. + +- HSP user with ``file-system-full-access`` role. + +- Established network connection between the HSP interface and OpenStack + nodes. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Extend a share. + +- Shrink a share. + +- Allow share access. + +- Deny share access. + +- Manage a share. + +- Unmanage a share. + +.. note:: + + - Only ``IP`` access type is supported + - Both ``RW`` and ``RO`` access levels supported + + +Known restrictions +~~~~~~~~~~~~~~~~~~ + +- The Hitachi HSP allows only 1024 virtual file systems per cluster. This + determines the limit of shares the driver can provide. + +- The Hitachi HSP file systems must have at least 128 GB. This means that + all shares created by Shared File Systems service should have 128 GB or + more. + + .. note:: + The driver has an internal filter function that accepts only requests for + shares size greater than or equal to 128 GB, otherwise the request will + fail or be redirected to another available storage backend. + + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to the share +driver. + +.. include:: ../../tables/manila-hds_hsp.inc + +Network approach +~~~~~~~~~~~~~~~~ + +.. note:: + + In the driver mode used by HSP Driver (DHSS = ``False``), the driver does + not handle network configuration, it is up to the administrator to + configure it. + +* Configure the network of the manila-share, Compute and Networking nodes to + reach HSP interface. For this, your provider network should be capable of + reaching HSP Cluster-Virtual-IP. These connections are mandatory so nova + instances are capable of accessing shares provided by the backend. + +* The following image represents a valid scenario: + +.. image:: ../../figures/hsp_network.png + :width: 60% + +.. note:: + + To HSP, the Virtual IP is the address through which clients access shares + and the Shared File Systems service sends commands to the management + interface. + This IP can be checked in HSP using its CLI: + + .. code-block:: console + + $ hspadm ip-address list + +Back end configuration +~~~~~~~~~~~~~~~~~~~~~~ + +#. Configure HSP driver according to your environment. This example + shows a valid HSP driver configuration: + + .. code-block:: ini + + [DEFAULT] + # ... + enabled_share_backends = hsp1 + enabled_share_protocols = NFS + # ... + + [hsp1] + share_backend_name = HITACHI1 + share_driver = manila.share.drivers.hitachi.hsp.driver.HitachiHSPDriver + driver_handles_share_servers = False + hitachi_hsp_host = 172.24.47.190 + hitachi_hsp_username = admin + hitachi_hsp_password = admin_password + +#. Configure HSP share type. + + .. note:: + + Shared File Systems service requires that the share type includes the + ``driver_handles_share_servers`` extra-spec. This ensures that the + share will be created on a backend that supports the requested + ``driver_handles_share_servers`` capability. Also, + ``snapshot_support`` extra-spec should be provided if its value + differs from the default value (``True``), as this driver version + that currently does not support snapshot operations. For this + driver both extra-specs must be set to ``False``. + + .. code-block:: console + + $ manila type-create --snapshot_support False hsp False + +#. Restart all Shared File Systems services (``manila-share``, + ``manila-scheduler`` and ``manila-api``). + + +Manage and unmanage shares +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Shared File Systems service has the ability to manage and unmanage shares. +If there is a share in the storage and it is not in OpenStack, you can manage +that share and use it as a Shared File Systems share. Previous access rules +are not imported by manila. The unmanage operation only unlinks the share from +OpenStack, preserving all data in the share. + +In order to manage a HSP share, it must adhere to the following rules: + +- File system and share name must not contain spaces. + +- Share name must not contain backslashes (`\\`). + +To **manage** a share use: + +.. code-block:: console + + $ manila manage [--name ] [--description ] + [--share_type ] [--driver_options [ + [ ...]]] + +Where: + ++--------------------+------------------------------------------------------+ +| **Parameter** | **Description** | ++====================+======================================================+ +| | Manila host, backend and share name. For example, | +| ``service_host`` | ``ubuntu@hitachi1#hsp1``. The available hosts can | +| | be listed with the command: ``manila pool-list`` | +| | (admin only). | ++--------------------+---------------------+--------------------------------+ +| ``protocol`` | Must be **NFS**, the only supported protocol in this | +| | driver version. | ++--------------------+------------------------------------------------------+ +| ``export_path`` | The Hitachi Hyper Scale-Out Platform export path of | +| | the share, for example: | +| | ``172.24.47.190:/some_share_name`` | ++--------------------+------------------------------------------------------+ + +| To **unmanage** a share use: + +.. code-block:: console + + $ manila unmanage + +Where: + ++------------------+---------------------------------------------------------+ +| **Parameter** | **Description** | ++==================+=========================================================+ +| ``share`` | ID or name of the share to be unmanaged. This list can | +| | be fetched with: ``manila list``. | ++------------------+---------------------+-----------------------------------+ + + +Additional notes +~~~~~~~~~~~~~~~~ + +- Shares are thin provisioned. It is reported to manila only the + real used space in HSP. +- Administrators should manage the tenant’s quota (``manila quota-update``) + to control the backend usage. diff --git a/doc/source/configuration/shared-file-systems/drivers/hpe-3par-share-driver.rst b/doc/source/configuration/shared-file-systems/drivers/hpe-3par-share-driver.rst new file mode 100644 index 0000000000..bb0d39a2cb --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/hpe-3par-share-driver.rst @@ -0,0 +1,643 @@ +=============== +HPE 3PAR driver +=============== + +The HPE 3PAR driver provides NFS and CIFS shared file systems to +OpenStack using HPE 3PAR's File Persona capabilities. + +HPE 3PAR File Persona Software Suite concepts and terminology +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The software suite comprises the following managed objects: + +- File Provisioning Groups (FPGs) + +- Virtual File Servers (VFSs) + +- File Stores + +- File Shares + +The File Persona Software Suite is built upon the resilient mesh-active +architecture of HPE 3PAR StoreServ and benefits from HPE 3PAR storage +foundation of wide-striped logical disks and autonomic +``Common Provisioning Groups (CPGs)``. A CPG can be shared between file and +block to create the File Shares or the logical unit numbers (LUNs) to +provide true convergence. + +``A File Provisioning Group (FPG)`` is an instance of the HPE intellectual +property Adaptive File System. It controls how files are stored and retrieved. +Each FPG is transparently constructed from one or multiple +Virtual Volumes (VVs) and is the unit for replication and disaster recovery +for File Persona Software Suite. There are up to 16 FPGs supported on a +node pair. + +``A Virtual File Server (VFS)`` is conceptually like a server. As such, it +presents virtual IP addresses to clients, participates in user authentication +services, and can have properties for such things as user/group quota +management and antivirus policies. Up to 16 VFSs are supported on a node pair, +one per FPG. + +``File Stores`` are the slice of a VFS and FPG at which snapshots are taken, +capacity quota management can be performed, and antivirus scan service +policies customized. There are up to 256 File Stores supported on a node pair, +16 File Stores per VFS. + +``File Shares`` are what provide data access to clients via SMB, NFS, and the +Object Access API, subject to the share permissions applied to them. Multiple +File Shares can be created for a File Store and at different directory levels +within a File Store. + +Supported shared filesystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The driver supports CIFS and NFS shares. + +Operations supported +~~~~~~~~~~~~~~~~~~~~ +- Create a share. + + – Share is not accessible until access rules allow access. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + – IP access rules are required for NFS share access. + + – User access rules are not allowed for NFS shares. + + – User access rules are required for SMB share access. + + – User access requires a File Persona local user for SMB shares. + + – Shares are read/write (and subject to ACLs). + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +- Extend a share. + +- Shrink a share. + +- Share networks. + +HPE 3PAR File Persona driver can be configured to work with or without +share networks. When using share networks, the HPE 3PAR +driver allocates an FSIP on the back end FPG (VFS) to match the share +network's subnet and segmentation ID. Security groups associated +with share networks are ignored. + +Operations not supported +~~~~~~~~~~~~~~~~~~~~~~~~ + +- Manage and unmanage + +- Manila Experimental APIs (consistency groups, replication, and migration) + were added in Mitaka but have not yet been implemented by the HPE 3PAR + File Persona driver. + +Requirements +~~~~~~~~~~~~ + +On the OpenStack host running the Manila share service: + +- python-3parclient version 4.2.0 or newer from PyPI. + +On the HPE 3PAR array: + +- HPE 3PAR Operating System software version 3.2.1 MU3 or higher. + +- A license that enables the File Persona feature. + +- The array class and hardware configuration must support File Persona. + +Pre-configuration on the HPE 3PAR StoreServ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The following HPE 3PAR CLI commands show how to set up the HPE 3PAR StoreServ +to use File Persona with OpenStack Manila. HPE 3PAR File Persona must be +licensed, initialized, and started on the HPE 3PAR storage. + +.. code-block:: console + + cli% startfs 0:2:1 1:2:1 + cli% setfs nodeip -ipaddress 10.10.10.11 -subnet 255.255.240.0 0 + cli% setfs nodeip -ipaddress 10.10.10.12 -subnet 255.255.240.0 1 + cli% setfs dns 192.168.8.80,127.127.5.50 foo.com,bar.com + cli% setfs gw 10.10.10.10 + +- A File Provisioning Group (FPG) must be created for use with the + Shared File Systems service. + + .. code-block:: console + + cli% createfpg examplecpg examplefpg 18T + +- A Virtual File Server (VFS) must be created on the FPG. + +- The VFS must be configured with an appropriate share export IP + address. + + .. code-block:: console + + cli% createvfs -fpg examplefpg 10.10.10.101 255.255.0.0 examplevfs + +- A local user in the Administrators group is needed for CIFS (SMB) shares. + + .. code-block:: console + + cli% createfsgroup fsusers + cli% createfsuser –passwd -enable true -grplist + Users,Administrators –primarygroup fsusers fsadmin + +- The WSAPI with HTTP and/or HTTPS must be enabled and started. + + .. code-block:: console + + cli% setwsapi -https enable + cli% startwsapi + +HPE 3PAR shared file system driver configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Install the python-3parclient python package on the OpenStack Block Storage + system: + + .. code-block:: console + + $ pip install 'python-3parclient>=4.0,<5.0' + +- Manila configuration file + + The Manila configuration file (typically ``/etc/manila/manila.conf``) + defines and configures the Manila drivers and backends. After updating the + configuration file, the Manila share service must be restarted for changes + to take effect. + +- Enable share protocols + + To enable share protocols, an optional list of supported protocols can be + specified using the ``enabled_share_protocols`` setting in the ``DEFAULT`` + section of the ``manila.conf`` file. The default is ``NFS, CIFS`` which + allows both protocols supported by HPE 3PAR (NFS and SMB). Where Manila + uses the term ``CIFS``, HPE 3PAR uses the term ``SMB``. Use the + ``enabled_share_protocols`` option if you want to only provide one type of + share (for example, only NFS) or if you want to explicitly avoid the + introduction of other protocols that can be added for other drivers in the + future. + +- Enable share back ends + + In the ``[DEFAULT]`` section of the Manila configuration file, use the + ``enabled_share_backends`` option to specify the name of one or more + back-end configuration sections to be enabled. To enable multiple + back ends, use a comma-separated list. + + .. note:: + + The name of the backend's configuration section is used (which may + be different from the ``share_backend_name`` value) + +- Configure each back end + + For each back end, a configuration section defines the driver and back end + options. These include common Manila options, as well as driver-specific + options. The following ``Driver options`` section describes + the parameters that need to be configured in the Manila + configuration file for the HPE 3PAR driver. + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to the +share driver. + +.. include:: ../../tables/manila-hpe3par.inc + + +HPE 3PAR Manila driver configuration example +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following parameters shows a sample subset of the ``manila.conf`` file, +which configures two backends and the relevant ``[DEFAULT]`` options. A real +configuration would include additional ``[DEFAULT]`` options and additional +sections that are not discussed in this document. In this example, the +backends are using different FPGs on the same array: + +.. code-block:: ini + + [DEFAULT] + enabled_share_backends = HPE1,HPE2 + enabled_share_protocols = NFS,CIFS + default_share_type = default + [HPE1] + share_backend_name = HPE3PAR1 + share_driver = manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver + driver_handles_share_servers = False + max_over_subscription_ratio = 1 + hpe3par_fpg = examplefpg,10.10.10.101 + hpe3par_san_ip = 10.20.30.40 + hpe3par_api_url = https://10.20.30.40:8080/api/v1 + hpe3par_username = + hpe3par_password = + hpe3par_san_login = + hpe3par_san_password = + hpe3par_debug = False + hpe3par_cifs_admin_access_username = + hpe3par_cifs_admin_access_password = + [HPE2] + share_backend_name = HPE3PAR2 + share_driver = manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver + driver_handles_share_servers = False + max_over_subscription_ratio = 1 + hpe3par_fpg = examplefpg2,10.10.10.102 + hpe3par_san_ip = 10.20.30.40 + hpe3par_api_url = https://10.20.30.40:8080/api/v1 + hpe3par_username = + hpe3par_password = + hpe3par_san_login = + hpe3par_san_password = + hpe3par_debug = False + hpe3par_cifs_admin_access_username = + hpe3par_cifs_admin_access_password = + + +Network approach +~~~~~~~~~~~~~~~~ + +Network connectivity between the storage array (SSH/CLI and WSAPI) and the +Manila host is required for share management. Network connectivity between +the clients and the VFS is required for mounting and using the shares. +This includes: + +- Routing from the client to the external network. + +- Assigning the client an external IP address, for example a floating IP. + +- Configuring the Shared File Systems service host networking properly + for IP forwarding. + +- Configuring the VFS networking properly for client subnets. + +- Configuring network segmentation, if applicable. + +In the OpenStack Kilo release, the HPE 3PAR driver did not support share +networks. Share access from clients to HPE 3PAR shares required external +network access (external to OpenStack) and was set up and configured outside +of Manila. + +In the OpenStack Liberty release, the HPE 3PAR driver could run with or +without share networks. The configuration option +``driver_handles_share_servers``( ``True`` or ``False`` ) indicated whether +share networks could be used. When set to ``False``, the HPE 3PAR driver +behaved as described earlier for Kilo. When set to ``True``, the share +network’s subnet, segmentation ID and IP address range were used to allocate +an FSIP on the HPE 3PAR. There is a limit of four FSIPs per VFS. For clients +to communicate with shares via this FSIP, the client must have access to the +external network using the subnet and segmentation ID of the share network. + +For example, the client must be routed to the neutron provider network with +external access. The Manila host networking configuration and network +switches must support the subnet routing. If the VLAN segmentation ID is used, +communication with the share will use the FSIP IP address. Neutron networking +is required for HPE 3PAR share network support. Flat and VLAN provider +networks are supported, but the HPE 3PAR driver does not support share network +security groups. + +Share access +~~~~~~~~~~~~ +A share that is mounted before access is allowed can appear to be an empty +read-only share. After granting access, the share must be remounted. + +- IP access rules are required for NFS. + +- SMB shares require user access rules. + +With the proper access rules, share access is not limited to the OpenStack +environment. Access rules added via Manila or directly in HPE 3PAR CLI can be +used to allow access to clients outside of the stack. The HPE 3PAR VFS/FSIP +settings determine the subnets visible for HPE 3PAR share access. + +- IP access rules + + To allow IP access to a share in the horizon UI, find the share in the + Project|Manage Compute|Shares view. Use the ``Manage Rules`` action to add + a rule. Select IP as the access type, and enter the external IP address + (for example, the floating IP) of the client in the ``Access to`` box. + + You can also use the command line to allow IP access to a share in the + horizon UI with the command: + + .. code-block:: console + + $ manila access-allow ip + +- User access rules + + To allow user access to a share in the horizon UI, find the share in the + Project|Manage Compute|Shares view. Use the ``Manage Rules`` action to add + a rule. Select user as the access type and enter user name in the + ``Access to`` box. + + You can also use the command line to allow user access to a share in the + horizon UI with the command: + + .. code-block:: console + + $ manila access-allow user + + The user name must be an HPE 3PAR user. + + Share access is different from file system permissions, + for example, ACLs on files and folders. If a user wants to read a file, + the user must have at least read permissions on the share and an ACL that + grants him read permissions on the file or folder. Even with + full control share access, it does not mean every user can do + everything due to the additional restrictions of the folder ACLs. + + To modify the file or folder ACLs, allow access to an HPE 3PAR File Persona + local user that is in the administrator's group and connect to the share + using that user's credentials. Then, use the appropriate mechanism to + modify the ACL or permissions to allow different access than what is + provided by default. + +.. _Share types: + +Share types +~~~~~~~~~~~ + +When creating a share, a share type can be specified to determine where and +how the share will be created. If a share type is not specified, the +``default_share_type`` set in the Shared File Systems service configuration +file is used. + +Manila share types are a type or label that can be selected at share creation +time in OpenStack. These types can be created either in the ``Admin`` horizon +UI or using the command line, as follows: + +.. code-block:: console + + $ manila --os-username admin --os-tenant-name demo type-create + –is_public false false + +The ```` is the name of the new share type. False at the end specifies +``driver_handles_share_servers=False``. The ``driver_handles_share_servers`` +setting in the share type needs to match the setting configured for the +back end in the ``manila.conf`` file. + +``is_public`` is used to indicate whether this share type is applicable to all +tenants or will be assigned to specific tenants. + +``--os-username admin --os-tenant-name demo`` are only needed if your +environment variables do not specify the desired user and tenant. + +For share types that are not public, use Manila ``type-access-add`` to assign +the share type to a tenant. + +- Using share types to require share networks + + The Shared File Systems service requires that the share type include the + ``driver_handles_share_servers`` extra-spec. This ensures that the share is + created on a back end that supports the requested + ``driver_handles_share_servers`` (share networks) capability. From the + Liberty release forward, both ``True`` and ``False`` are supported. + + The ``driver_handles_share_servers`` setting in the share type must match + the setting in the back end configuration. + +- Using share types to select backends by name + + Administrators can optionally specify that a particular share type be + explicitly associated with a single back end (or group of backends) by + including the extra spec share_backend_name to match the name specified + within the ``share_backend_name`` option in the back end configuration. + + When a share type is not selected during share creation, the default share + type is used. To prevent creating these shares on any back end, the default + share type needs to be specific enough to find appropriate default backends + (or to find none if the default should not be used). The following example + shows how to set share_backend_name for a share type. + + .. code-block:: console + + $ manila --os-username admin --os-tenant-name demo type-key + set share_backend_name=HPE3PAR2 + +- Using share types to select backends with capabilities + + The HPE 3PAR driver automatically reports capabilities based on the FPG + used for each back end. An administrator can create share types with extra + specs, which controls share types that can use FPGs with or without + specific capabilities. + + With the OpenStack Liberty release or later, below section shows the extra + specs used with the capabilities filter and the HPE 3PAR driver: + + ``hpe3par_flash_cache`` + When the value is set to `` True`` (or `` False``), shares of + this type are only created on a back end that uses HPE 3PAR Adaptive + Flash Cache. For Adaptive Flash Cache, the HPE 3PAR StoreServ Storage + array must meet the following requirements: + + - Adaptive Flash Cache license installed + - Available SSDs + - Adaptive Flash Cache must be enabled on the HPE 3PAR StoreServ + Storage array. This is done with the following CLI command: + + .. code-block:: console + + cli% createflashcache + + ```` must be in 16 GB increments. For example, the below command + creates 128 GB of Flash Cache for each node pair in the array. + + .. code-block:: console + + cli% createflashcache 128g + + - Adaptive Flash Cache must be enabled for the VV set used by an FPG. + For example, ``setflashcache vvset:``. The VV set name is the + same as the FPG name. + + .. note:: + + This setting affects all shares in that FPG (on that back end). + + ``Dedupe`` + When the value is set to `` True`` (or `` False``), shares of + this type are only created on a back end that uses deduplication. For HPE + 3PAR File Persona, the provisioning type is determined when the FPG is + created. Using the ``createfpg –tdvv`` option creates an FPG that + supports both dedupe and thin provisioning. A thin deduplication license + must be installed to use the tdvv option. + + ``thin_provisioning`` + When the value is set to `` True`` (or `` False``), shares of + this type are only created on a back end that uses thin (or full) + provisioning. For HPE 3PAR File Persona, the provisioning type is + determined when the FPG is created. By default, FPGs are created with + thin provisioning. The capacity filter uses the total provisioned space + and configured ``max_oversubscription_ratio`` when filtering and weighing + backends that use thin provisioning. + + +- Using share types to influence share creation options + + Scoped extra-specs are used to influence vendor-specific implementation + details. Scoped extra-specs use a prefix followed by a colon. For HPE 3PAR, + these extra specs have a prefix of hpe3par. + + The following HPE 3PAR extra-specs are used when creating CIFS (SMB) + shares: + + ``hpe3par:smb_access_based_enum`` + ``smb_access_based_enum`` (Access Based Enumeration) specifies if users + can see only the files and directories to which they have been allowed + access on the shares. Valid values are ``True`` or ``False``. + The default is ``False``. + + ``hpe3par:smb_continuous_avail`` + ``smb_continuous_avail`` (Continuous Availability) specifies if + continuous availability features of SMB3 should be enabled for this + share. Valid values are ``True`` or ``False``. The default is ``True``. + + ``hpe3par:smb_cache`` + ``smb_cache`` specifies client-side caching for offline files. The + default value is ``manual``. Valid values are: + + - ``off`` — the client must not cache any files from this share. The + share is configured to disallow caching. + + - ``manual`` — the client must allow only manual caching for the files + open from this share. + + - ``optimized`` — the client may cache every file that it opens from + this share. Also, the client may satisfy the file requests from its + local cache. The share is configured to allow automatic caching of + programs and documents. + + - ``auto`` — the client may cache every file that it opens from this + share. The share is configured to allow automatic caching of + documents. + + When creating NFS shares, the following HPE 3PAR extra-specs are used: + + ``hpe3par:nfs_options`` + Comma separated list of NFS export options. + + The NFS export options have the following limitations: + + ``ro`` and ``rw`` are not allowed (will be determined by the driver) + + ``no_subtree_check`` and ``fsid`` are not allowed per HPE 3PAR CLI + support + + ``(in)secure`` and ``(no_)root_squash`` are not allowed because the HPE + 3PAR driver controls those settings + + All other NFS options are forwarded to the HPE 3PAR as part of share + creation. The HPE 3PAR performs additional validation at share creation + time. For details, see the HPE 3PAR CLI help. + + +Implementation characteristics +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Shares from snapshots + + + - When a share is created from a snapshot, the share must be deleted + before the snapshot can be deleted. This is enforced by the driver. + + - A snapshot of an empty share will appear to work correctly, but + attempting to create a share from an empty share snapshot may fail with + an ``NFS Create export`` error. + + - HPE 3PAR File Persona snapshots are for an entire File Store. In Manila, + they appear as snapshots of shares. A share sub-directory is used to + give the appearance of a share snapshot when using ``create share from + snapshot`` . + +- Snapshots + + - For HPE 3PAR File Persona, snapshots are per File Store and not per + share. So, the HPE 3PAR limit of 1024 snapshots per File Store results + in a Manila limit of 1024 snapshots per tenant on each back end FPG. + + - Before deleting a share, you must delete its snapshots. This is enforced + by Manila. For HPE 3PAR File Persona, this also kicks off a snapshot + reclamation. + +- Size enforcement + + Manila users create shares with size limits. HPE 3PAR enforces size limits + by using File Store quotas. When using ``hpe3par_fstore_per_share``= + ``True``(the non-default setting) there is only one share per File Store, + so the size enforcement acts as expected. When using + ``hpe3par_fstore_per_share`` = ``False`` (the default), the HPE 3PAR Manila + driver uses one File Store for multiple shares. In this case, the size of + the File Store limit is set to the cumulative limit of its Manila share + sizes. This can allow one tenant share to exceed the limit and affect the + space available for the same tenant’s other shares. One tenant cannot use + another tenant’s File Store. + + +- File removal + + When shares are removed and the ``hpe3par_fstore_per_share``=``False`` + setting is used (the default), files may be left behind in the File Store. + Prior to Mitaka, removal of obsolete share directories and files that have + been stranded would require tools outside of OpenStack/Manila. In Mitaka + and later, the driver mounts the File Store to remove the deleted share’s + subdirectory and files. For SMB/CIFS share, it requires the + ``hpe3par_cifs_admin_access_username`` and + ``hpe3par_cifs_admin_access_password`` configuration. If the mount and + delete cannot be performed, an error is logged and the share is deleted + in Manila. Due to the potential space held by leftover files, File Store + quotas are not reduced when shares are removed. + + +- Multi-tenancy + + - Network + + The ``driver_handles_share_servers`` configuration setting determines + whether share networks are supported. When + ``driver_handles_share_servers`` is set to ``True``, a share network is + required to create a share. The administrator creates share networks + with the desired network, subnet, IP range, and segmentation ID. The HPE + 3PAR is configured with an FSIP using the same subnet and + segmentation ID and an IP address allocated from the neutron network. + Using share network-specific IP addresses, subnets, and segmentation IDs + give the appearance of better tenant isolation. Shares on an FPG, + however, are accessible via any of the FSIPs (subject to access rules). + Back end filtering should be used for further separation. + + - Back end filtering + + A Manila HPE 3PAR back end configuration refers to a specific array and + a specific FPG. With multiple backends and multiple tenants, the + scheduler determines where shares will be created. In a scenario where + an array or back end needs to be restricted to one or more specific + tenants, share types can be used to influence the selection of a + back end. For more information on using share types, + see `Share types`_ . + + - Tenant limit + + The HPE 3PAR driver uses one File Store per tenant per protocol in each + configured FPG. When only one back end is configured, this results in a + limit of eight tenants (16 if only using one protocol). Use multiple + back end configurations to introduce additional FPGs on the same array + to increase the tenant limit. + + When using share networks, an FSIP is created for each share network + (when its first share is created on the back end). The HPE 3PAR supports + 4 FSIPs per FPG (VFS). One of those 4 FSIPs is reserved for the initial + VFS IP, so the share network limit is 48 share networks per node pair. diff --git a/doc/source/configuration/shared-file-systems/drivers/huawei-nas-driver.rst b/doc/source/configuration/shared-file-systems/drivers/huawei-nas-driver.rst new file mode 100644 index 0000000000..4e55d351a6 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/huawei-nas-driver.rst @@ -0,0 +1,132 @@ +============= +Huawei driver +============= + +Huawei NAS driver is a plug-in based on the Shared File Systems service. +The Huawei NAS driver can be used to provide functions such as the share +and snapshot for virtual machines, or instances, in OpenStack. Huawei +NAS driver enables the OceanStor V3 series V300R002 storage system to +provide only network filesystems for OpenStack. + +Requirements +~~~~~~~~~~~~ + +- The OceanStor V3 series V300R002 storage system. + +- The following licenses should be activated on V3 for File: CIFS, NFS, + HyperSnap License (for snapshot). + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports CIFS and NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported for NFS. + + - Only user access is supported for CIFS. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Support pools in one backend. + +- Extend a share. + +- Shrink a share. + +- Create a replica. + +- Delete a replica. + +- Promote a replica. + +- Update a replica state. + +Pre-configurations on Huawei +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Create a driver configuration file. The driver configuration file + name must be the same as the ``manila_huawei_conf_file`` item in the + ``manila_conf`` configuration file. + +#. Configure the product. Product indicates the storage system type. + For the OceanStor V3 series V300R002 storage systems, the driver + configuration file is as follows: + + .. code-block:: xml + + + + + V3 + x.x.x.x + https://x.x.x.x:8088/deviceManager/rest/ + xxxxxxxxx + xxxxxxxxx + + + xxxxxxxxx + xxxxxxxxx + 3 + 60 + + + + The options are: + + - ``Product`` is a type of storage product. Set it to ``V3``. + + - ``LogicalPortIP`` is the IP address of the logical port. + + - ``RestURL`` is an access address of the REST interface. + Multiple RestURLs can be configured in ````, + separated by ";". The driver will automatically retry another + ``RestURL`` if one fails to connect. + + - ``UserName`` is the user name of an administrator. + + - ``UserPassword`` is the password of an administrator. + + - ``Thin_StoragePool`` is the name of a thin storage pool to be used. + + - ``Thick_StoragePool`` is the name of a thick storage pool to be used. + + - ``WaitInterval`` is the interval time of querying the file + system status. + + - ``Timeout`` is the timeout period for waiting command + execution of a device to complete. + +Back end configuration +~~~~~~~~~~~~~~~~~~~~~~ + +Modify the ``manila.conf`` Shared File Systems service configuration +file and add ``share_driver`` and ``manila_huawei_conf_file`` items. +Here is an example for configuring a storage system: + +.. code-block:: ini + + share_driver = manila.share.drivers.huawei.huawei_nas.HuaweiNasDriver + manila_huawei_conf_file = /etc/manila/manila_huawei_conf.xml + driver_handles_share_servers = False + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to the +share driver. + +.. include:: ../../tables/manila-huawei.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/ibm-spectrumscale-driver.rst b/doc/source/configuration/shared-file-systems/drivers/ibm-spectrumscale-driver.rst new file mode 100644 index 0000000000..9e2460b4bf --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/ibm-spectrumscale-driver.rst @@ -0,0 +1,177 @@ +=============================== +IBM Spectrum Scale share driver +=============================== + +IBM Spectrum Scale is a flexible software-defined storage product that can be +deployed as high-performance file storage or a cost optimized +large-scale content repository. IBM Spectrum Scale, previously known as +IBM General Parallel File System (GPFS), is designed to scale performance +and capacity with no bottlenecks. IBM Spectrum Scale is a cluster file system +that provides concurrent access to file systems from multiple nodes. The +storage provided by these nodes can be direct attached, network attached, SAN +attached, or a combination of these methods. Spectrum Scale provides many +features beyond common data access, including data replication, policy based +storage management, and space efficient file snapshot and clone operations. + +Supported shared filesystems and operations (NFS shares only) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Spectrum Scale share driver supports NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + - Only IP access type is supported. + + - Both RW & RO access level is supported. + +- Deny share access. + +- Create a share snapshot. + +- Delete a share snapshot. + +- Create a share from a snapshot. + +- Extend a share. + +- Manage a share. + +- Unmanage a share. + +Requirements +~~~~~~~~~~~~ + +Spectrum Scale must be installed and a cluster must be created that includes +one or more storage nodes and protocol server nodes. The NFS server +running on these nodes is used to export shares to storage consumers in +OpenStack virtual machines or even to bare metal storage consumers in the +OpenStack environment. A file system must also be created and +mounted on these nodes before configuring the manila service to use Spectrum +Scale storage. For more details, refer to `Spectrum Scale product +documentation `_. + +Spectrum Scale supports two ways of exporting data through NFS with high +availability. + +#. CES (which uses Ganesha NFS) + + * This is provided inherently by the protocol support in Spectrum Scale + and is a recommended method for NFS access. + +#. CNFS (which uses kernel NFS) + +For more information on NFS support in Spectrum Scale, refer to +`Protocol support in Spectrum Scale `_ and +`NFS Support overview in Spectrum Scale `_. + +The following figure is an example of Spectrum Scale architecture +with OpenStack services: + +.. figure:: ../../figures/openstack-spectrumscale-setup.JPG + :width: 90% + :align: center + :alt: OpenStack with Spectrum Scale Setup + +Quotas should be enabled for the Spectrum Scale filesystem to be exported +through NFS using Spectrum Scale share driver. +Use the following command to enable quota for a filesystem: + +.. code-block:: console + + $ mmchfs -Q yes + +Limitation +~~~~~~~~~~ + +Spectrum Scale share driver currently supports creation of NFS shares in the +flat network space only. For example, the Spectrum Scale storage node exporting +the data should be in the same network as that of the Compute VMs which mount +the shares acting as NFS clients. + +Driver configuration +~~~~~~~~~~~~~~~~~~~~ + +Spectrum Scale share driver supports creation of shares using both NFS servers +(Ganesha using Spectrum Scale CES/Kernel NFS). + +For both the NFS server types, you need to set the ``share_driver`` in the +``manila.conf`` as: + +.. code-block:: ini + + share_driver = manila.share.drivers.ibm.gpfs.GPFSShareDriver + +Spectrum Scale CES (NFS Ganesha server) +--------------------------------------- + +To use Spectrum Scale share driver in this mode, set the ``gpfs_share_helpers`` +in the ``manila.conf`` as: + +.. code-block:: ini + + gpfs_share_helpers = CES=manila.share.drivers.ibm.gpfs.CESHelper + +Following table lists the additional configuration options which are used with +this driver configuration. + +.. include:: ../../tables/manila-spectrumscale_ces.inc + +.. note:: + + Configuration options related to ssh are required only if ``is_gpfs_node`` + is set to ``False``. + +Spectrum Scale Clustered NFS (Kernel NFS server) +------------------------------------------------ + +To use Spectrum Scale share driver in this mode, set the ``gpfs_share_helpers`` +in the ``manila.conf`` as: + +.. code-block:: ini + + gpfs_share_helpers = KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper + +Following table lists the additional configuration options which are used with +this driver configuration. + +.. include:: ../../tables/manila-spectrumscale_knfs.inc + +.. note:: + + Configuration options related to ssh are required only if ``is_gpfs_node`` + is set to ``False``. + +Share creation steps +~~~~~~~~~~~~~~~~~~~~ + +Sample configuration +-------------------- + +.. code-block:: ini + + [gpfs] + share_driver = manila.share.drivers.ibm.gpfs.GPFSShareDriver + gpfs_share_export_ip = x.x.x.x + gpfs_mount_point_base = /ibm/gpfs0 + gpfs_nfs_server_type = CES + is_gpfs_node = True + gpfs_share_helpers = CES=manila.share.drivers.ibm.gpfs.CESHelper + share_backend_name = GPFS + driver_handles_share_servers = False + + +Create GPFS share type and set extra spec +----------------------------------------- + +.. code-block:: ini + + $ manila type-create --snapshot_support True \ + --create_share_from_snapshot_support True gpfs False + + $ manila type-key gpfs set share_backend_name=GPFS diff --git a/doc/source/configuration/shared-file-systems/drivers/lvm-driver.rst b/doc/source/configuration/shared-file-systems/drivers/lvm-driver.rst new file mode 100644 index 0000000000..1652a6b4a9 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/lvm-driver.rst @@ -0,0 +1,89 @@ +================ +LVM share driver +================ + +The Shared File Systems service can be configured to use LVM share +driver. LVM share driver relies solely on LVM running on the same host with +manila-share service. It does not require any services not +related to the Shared File Systems service to be present to work. + +Prerequisites +~~~~~~~~~~~~~ + +The following packages must be installed on the same host with manila-share +service: + +- NFS server + +- Samba server >= 3.2.0 + +- LVM2 >= 2.02.66 + +Services must be up and running, ports used by the services must not be +blocked. A node with manila-share service should be accessible to share +service users. + +LVM should be preconfigured. By default, LVM driver expects to find a volume +group named ``lvm-shares``. This volume group will be used by the driver for +share provisioning. It should be managed by node administrator separately. + +Shared File Systems service driver configuration setting +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To use the driver, one should set up a corresponding back end. A driver +must be explicitly specified as well as export IP address. A +minimal back-end specification that will enable LVM share driver is presented +below: + +.. code-block:: ini + + [LVM_sample_backend] + driver_handles_share_servers = False + share_driver = manila.share.drivers.lvm.LVMShareDriver + lvm_share_export_ip = 1.2.3.4 + +In the example above, ``lvm_share_export_ip`` is the address to be used by +clients for accessing shares. In the simplest case, it should be the same +as host's address. + +Supported shared file systems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports CIFS and NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported for NFS. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +- Extend a share. + +Known restrictions +~~~~~~~~~~~~~~~~~~ + +- LVM driver should not be used on a host running Neutron agents, simultaneous + usage might cause issues with share deletion (shares will not get deleted + from volume groups). + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to this +driver. + +.. include:: ../../tables/manila-lvm.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/maprfs-native-driver.rst b/doc/source/configuration/shared-file-systems/drivers/maprfs-native-driver.rst new file mode 100644 index 0000000000..b81a2a2448 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/maprfs-native-driver.rst @@ -0,0 +1,137 @@ +==================== +MapRFS native driver +==================== + +MapR-FS native driver is a plug-in based on the Shared File Systems service +and provides high-throughput access to the data on MapR-FS distributed file +system, which is designed to hold very large amounts of data. + +A Shared File Systems service share in this driver is a volume in MapR-FS. +Instances talk directly to the MapR-FS storage backend via the (mapr-posix) +client. To mount a MapR-FS volume, the MapR POSIX client is required. +Access to each share is allowed by user and group based access type, which is +aligned with MapR-FS ACEs to support access control for multiple users and +groups. If user name and group name are the same, the group access type will +be used by default. + +For more details, see `MapR documentation `_. + +Network configuration +~~~~~~~~~~~~~~~~~~~~~ + +The storage backend and Shared File Systems service hosts should be in a flat +network. Otherwise, the L3 connectivity between them should exist. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports MapR-FS shares. + +The following operations are supported: + +- Create MapR-FS share. +- Delete MapR-FS share. +- Allow MapR-FS Share access. + + - Only support user and group access type. + - Support level of access (ro/rw). + +- Deny MapR-FS Share access. +- Update MapR-FS Share access. +- Create snapshot. +- Delete snapshot. +- Create share from snapshot. +- Extend share. +- Shrink share. +- Manage share. +- Unmanage share. +- Manage snapshot. +- Unmanage snapshot. +- Ensure share. + +Requirements +~~~~~~~~~~~~ + +- Install MapR core packages, version >= 5.2.x, on the storage backend. + +- To enable snapshots, the MapR cluster should have at least M5 license. + +- Establish network connection between the Shared File Systems service hosts + and storage backend. + +- Obtain a `ticket `_ + for user who will be used to access MapR-FS. + +Back end configuration (manila.conf) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Add MapR-FS protocol to ``enabled_share_protocols``: + +.. code-block:: ini + + enabled_share_protocols = MAPRFS + +Create a section for MapR-FS backend. Example: + +.. code-block:: ini + + [maprfs] + driver_handles_share_servers = False + share_driver = + manila.share.drivers.maprfs.maprfs_native.MapRFSNativeShareDriver + maprfs_clinode_ip = example + maprfs_ssh_name = mapr + maprfs_ssh_pw = mapr + share_backend_name = maprfs + +Set ``driver-handles-share-servers`` to ``False`` as the driver does not +manage the lifecycle of ``share-servers``. + +Add driver backend to ``enabled_share_backends``: + +.. code-block:: ini + + enabled_share_backends = maprfs + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to this +driver. + +.. include:: ../../tables/manila-maprfs.inc + +Known restrictions +~~~~~~~~~~~~~~~~~~ + +This driver does not handle user authentication, no tickets or users are +created by this driver. This means that when 'access_allow' or +'update_access' is calling, this will have no effect without providing +tickets to users. + + +Share metadata +~~~~~~~~~~~~~~ + +MapR-FS shares can be created by specifying additional options. Metadata is +used for this purpose. Every metadata option with ``-`` prefix is passed to +MapR-FS volume. For example, to specify advisory volume quota add +``_advisoryquota=10G`` option to metadata: + +.. code-block:: console + + $ manila create MAPRFS 1 --metadata _advisoryquota=10G + +If you need to create a share with your custom backend name or export location +instead if uuid, you can specify ``_name`` and ``_path`` options: + +.. code-block:: console + + $ manila create MAPRFS 1 --metadata _name=example _path=/example + +.. WARNING:: + Specifying invalid options will cause an error. + +The list of allowed options depends on mapr-core version. +See `volume create `_ +for more information. diff --git a/doc/source/configuration/shared-file-systems/drivers/netapp-cluster-mode-driver.rst b/doc/source/configuration/shared-file-systems/drivers/netapp-cluster-mode-driver.rst new file mode 100644 index 0000000000..8bc73a0dd2 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/netapp-cluster-mode-driver.rst @@ -0,0 +1,87 @@ +================================== +NetApp Clustered Data ONTAP driver +================================== + +The Shared File Systems service can be configured to use NetApp +clustered Data ONTAP version 8. + +Network approach +~~~~~~~~~~~~~~~~ + +L3 connectivity between the storage cluster and Shared File Systems +service host should exist, and VLAN segmentation should be configured. + +The clustered Data ONTAP driver creates storage virtual machines (SVM, +previously known as vServers) as representations of the Shared File +Systems service share server interface, configures logical interfaces +(LIFs) and stores shares there. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports CIFS and NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported for NFS. + + - Only user access type is supported for CIFS. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from a snapshot. + +- Extend a share. + +- Shrink a share. + +- Create a consistency group. + +- Delete a consistency group. + +- Create a consistency group snapshot. + +- Delete a consistency group snapshot. + +Required licenses +~~~~~~~~~~~~~~~~~ + +- NFS + +- CIFS + +- FlexClone + +Known restrictions +~~~~~~~~~~~~~~~~~~ + +- For CIFS shares an external active directory service is required. Its + data should be provided via security-service that is attached to used + share-network. + +- Share access rule by user for CIFS shares can be created only for + existing user in active directory. + +- To be able to configure clients to security services, the time on + these external security services and storage should be synchronized. + The maximum allowed clock skew is 5 minutes. + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to the +share driver. + +.. include:: ../../tables/manila-netapp.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/quobyte-driver.rst b/doc/source/configuration/shared-file-systems/drivers/quobyte-driver.rst new file mode 100644 index 0000000000..e136ca2784 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/quobyte-driver.rst @@ -0,0 +1,79 @@ +============== +Quobyte Driver +============== + +Quobyte can be used as a storage back end for the OpenStack Shared File +System service. Shares in the Shared File System service are mapped 1:1 +to Quobyte volumes. Access is provided via NFS protocol and IP-based +authentication. The Quobyte driver uses the Quobyte API service. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The drivers supports NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + Note the following limitations: + + - Only IP access type is supported. + +- Deny share access. + + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options specific to the +share driver. + +.. include:: ../../tables/manila-quobyte.inc + +Configuration +~~~~~~~~~~~~~~ + +To configure Quobyte access for the Shared File System service, a back end +configuration section has to be added in the ``manila.conf`` file. Add the +name of the configuration section to ``enabled_share_backends`` in the +``manila.conf`` file. For example, if the section is named ``Quobyte``: + +.. code-block:: ini + + enabled_share_backends = Quobyte + +Create the new back end configuration section, in this case named +``Quobyte``: + +.. code-block:: ini + + [Quobyte] + + share_driver = manila.share.drivers.quobyte.quobyte.QuobyteShareDriver + share_backend_name = QUOBYTE + quobyte_api_url = http://api.myserver.com:1234/ + quobyte_delete_shares = False + quobyte_volume_configuration = BASE + quobyte_default_volume_user = myuser + quobyte_default_volume_group = mygroup + +The section name must match the name used in the +``enabled_share_backends`` option described above. +The ``share_driver`` setting is required as shown, the +other options should be set according to your local Quobyte setup. + +Other security-related options are: + +.. code-block:: ini + + quobyte_api_ca = /path/to/API/server/verification/certificate + quobyte_api_username = api_user + quobyte_api_password = api_user_pwd + +Quobyte support can be found at the `Quobyte support webpage +`_. diff --git a/doc/source/configuration/shared-file-systems/drivers/zfs-on-linux-driver.rst b/doc/source/configuration/shared-file-systems/drivers/zfs-on-linux-driver.rst new file mode 100644 index 0000000000..5d7bc33084 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/zfs-on-linux-driver.rst @@ -0,0 +1,190 @@ +===================== +ZFS (on Linux) driver +===================== + +Manila ZFSonLinux share driver uses ZFS file system for exporting NFS shares. +Written and tested using Linux version of ZFS. + +Requirements +~~~~~~~~~~~~ + +- NFS daemon that can be handled through ``exportfs`` app. + +- ZFS file system packages, either Kernel or FUSE versions. + +- ZFS zpools that are going to be used by Manila should exist and be + configured as desired. Manila will not change zpool configuration. + +- For remote ZFS hosts according to manila-share service host SSH should be + installed. + +- For ZFS hosts that support replication: + + - SSH access for each other should be passwordless. + + - Service IP addresses should be available by ZFS hosts for each other. + +Supported shared filesystems and operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports NFS shares. + +The following operations are supported: + +- Create a share. + +- Delete a share. + +- Allow share access. + + - Only IP access type is supported. + + - Both access levels are supported - ``RW`` and ``RO``. + +- Deny share access. + +- Create a snapshot. + +- Delete a snapshot. + +- Create a share from snapshot. + +- Extend a share. + +- Shrink a share. + +- Share replication (experimental): + + - Create, update, delete, and promote replica operations are supported. + +Possibilities +~~~~~~~~~~~~~ + +- Any amount of ZFS zpools can be used by share driver. + +- Allowed to configure default options for ZFS datasets that are used + for share creation. + +- Any amount of nested datasets is allowed to be used. + +- All share replicas are read-only, only active one is read-write. + +- All share replicas are synchronized periodically, not continuously. + Status ``in_sync`` means latest sync was successful. + Time range between syncs equals to the value + of the ``replica_state_update_interval`` configuration global option. + +- Driver can use qualified extra spec ``zfsonlinux:compression``. + It can contain any value that ZFS app supports. + But if it is disabled through the configuration option + with the value ``compression=off``, then it will not be used. + +Restrictions +~~~~~~~~~~~~ + +The ZFSonLinux share driver has the following restrictions: + +- Only IP access type is supported for NFS. + +- Only FLAT network is supported. + +- ``Promote share replica`` operation will switch roles of + current ``secondary`` replica and ``active``. It does not make more than + one active replica available. + +- The below items are not yet implemented: + + - ``Manage share`` operation. + + - ``Manage snapshot`` operation. + + - ``SaMBa`` based sharing. + + - ``Thick provisioning`` capability. + +Known problems +~~~~~~~~~~~~~~ + +- ``Promote share replica`` operation will make ZFS file system that became + secondary as RO only on NFS level. On ZFS level system will + stay mounted as was - RW. + +Back-end configuration +~~~~~~~~~~~~~~~~~~~~~~ + +The following parameters need to be configured in the manila configuration file +for back-ends that use the ZFSonLinux driver: + +- ``share_driver`` + = manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver + +- ``driver_handles_share_servers`` = False + +- ``replication_domain`` = custom_str_value_as_domain_name + + - If empty, then replication will be disabled. + + - If set, then will be able to be used as replication peer for other + back ends with the same value. + +- ``zfs_share_export_ip`` = + +- ``zfs_service_ip`` = + +- ``zfs_zpool_list`` = zpoolname1,zpoolname2/nested_dataset_for_zpool2 + + - Can be one or more zpools. + + - Can contain nested datasets. + +- ``zfs_dataset_creation_options`` = + + - readonly, quota, sharenfs and sharesmb options will be ignored. + +- ``zfs_dataset_name_prefix`` = + + - Prefix to be used in each dataset name. + +- ``zfs_dataset_snapshot_name_prefix`` = + + - Prefix to be used in each dataset snapshot name. + +- ``zfs_use_ssh`` = + + - Set ``False`` if ZFS located on the same host as `manila-share` service. + + - Set ``True`` if `manila-share` service should use SSH + for ZFS configuration. + +- ``zfs_ssh_username`` = + + - Required for replication operations. + + - Required for SSH``ing to ZFS host if ``zfs_use_ssh`` is set to ``True``. + +- ``zfs_ssh_user_password`` = + + - Password for ``zfs_ssh_username`` of ZFS host. + + - Used only if ``zfs_use_ssh`` is set to ``True``. + +- ``zfs_ssh_private_key_path`` = + + - Used only if ``zfs_use_ssh`` is set to ``True``. + +- ``zfs_share_helpers`` + = NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper + + - Approach for setting up helpers is similar to various other share drivers. + + - At least one helper should be used. + +- ``zfs_replica_snapshot_prefix`` = + + - Prefix to be used in dataset snapshot names that are created + by ``update replica`` operation. + +Driver options +~~~~~~~~~~~~~~ + +.. include:: ../../tables/manila-zfs.inc diff --git a/doc/source/configuration/shared-file-systems/drivers/zfssa-manila-driver.rst b/doc/source/configuration/shared-file-systems/drivers/zfssa-manila-driver.rst new file mode 100644 index 0000000000..c4ff8e8db4 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/drivers/zfssa-manila-driver.rst @@ -0,0 +1,123 @@ +=================================== +Oracle ZFS Storage Appliance driver +=================================== +The Oracle ZFS Storage Appliance driver, version 1.0.0, enables the +Oracle ZFS Storage Appliance (ZFSSA) to be used seamlessly as a shared +storage resource for the OpenStack File System service (manila). The driver +provides the ability to create and manage NFS and CIFS shares +on the appliance, allowing virtual machines to access the shares +simultaneously and securely. + +Requirements +~~~~~~~~~~~~ +Oracle ZFS Storage Appliance Software version 2013.1.2.0 or later. + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +- Create NFS and CIFS shares. +- Delete NFS and CIFS shares. +- Allow or deny IP access to NFS shares. +- Create snapshots of a share. +- Delete snapshots of a share. +- Create share from snapshot. + +Restrictions +~~~~~~~~~~~~ + +- Access to CIFS shares are open and cannot be changed from manila. +- Version 1.0.0 of the driver only supports Single SVM networking mode. + +Appliance configuration +~~~~~~~~~~~~~~~~~~~~~~~ + +#. Enable RESTful service on the ZFSSA Storage Appliance. + +#. Create a new user on the appliance with the following authorizations:: + + scope=stmf - allow_configure=true + scope=nas - allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true + + You can create a role with authorizations as follows:: + + zfssa:> configuration roles + zfssa:configuration roles> role OpenStackRole + zfssa:configuration roles OpenStackRole (uncommitted)> set description="OpenStack Manila Driver" + zfssa:configuration roles OpenStackRole (uncommitted)> commit + zfssa:configuration roles> select OpenStackRole + zfssa:configuration roles OpenStackRole> authorizations create + zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=stmf + zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_configure=true + zfssa:configuration roles OpenStackRole auth (uncommitted)> commit + + You can create a user with a specific role as follows:: + + zfssa:> configuration users + zfssa:configuration users> user cinder + zfssa:configuration users cinder (uncommitted)> set fullname="OpenStack Manila Driver" + zfssa:configuration users cinder (uncommitted)> set initial_password=12345 + zfssa:configuration users cinder (uncommitted)> commit + zfssa:configuration users> select cinder set roles=OpenStackRole + +#. Create a storage pool. + + An existing pool can also be used if required. You can create a pool + as follows:: + + zfssa:> configuration storage + zfssa:configuration storage> config pool + zfssa:configuration storage verify> set data=2 + zfssa:configuration storage verify> done + zfssa:configuration storage config> done + +#. Create a new project. + + You can create a project as follows:: + + zfssa:> shares + zfssa:shares> project proj + zfssa:shares proj (uncommitted)> commit + +#. Create a new or use an existing data IP address. + + You can create an interface as follows:: + + zfssa:> configuration net interfaces ip + zfssa:configuration net interfaces ip (uncommitted)> set v4addrs=127.0.0.1/24 + v4addrs = 127.0.0.1/24 (uncommitted) + zfssa:configuration net interfaces ip (uncommitted)> set links=vnic1 + links = vnic1 (uncommitted) + zfssa:configuration net interfaces ip (uncommitted)> set admin=false + admin = false (uncommitted) + zfssa:configuration net interfaces ip (uncommitted)> commit + + It is required that both interfaces used for data and management are + configured properly. The data interface must be different from the + management interface. + +#. Configure the cluster. + + If a cluster is used as the manila storage resource, the following + verifications are required: + + - Verify that both the newly created pool and the network interface are of + type singleton and are not locked to the current controller. + This approach ensures that the pool and the interface used for data + always belong to the active controller, regardless of the current state + of the cluster. + + - Verify that the management IP, data IP and storage pool belong to the + same head. + + .. note:: + + A short service interruption occurs during failback or takeover, + but once the process is complete, manila should be able + to access the pool through the data IP. + +Driver options +~~~~~~~~~~~~~~ + +The Oracle ZFSSA driver supports these options: + +.. include:: ../../tables/manila-zfssa.inc diff --git a/doc/source/configuration/shared-file-systems/log-files.rst b/doc/source/configuration/shared-file-systems/log-files.rst new file mode 100644 index 0000000000..6dbed9f8a4 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/log-files.rst @@ -0,0 +1,30 @@ +===================================== +Log files used by Shared File Systems +===================================== + +The corresponding log file of each Shared File Systems service is stored +in the ``/var/log/manila/`` directory of the host on which each service +runs. + +.. list-table:: Log files used by Shared File Systems services + :header-rows: 1 + + * - Log file + - Service/interface (for CentOS, Fedora, openSUSE, Red Hat Enterprise + Linux, and SUSE Linux Enterprise) + - Service/interface (for Ubuntu and Debian) + * - ``api.log`` + - ``openstack-manila-api`` + - ``manila-api`` + * - ``manila-manage.log`` + - ``manila-manage`` + - ``manila-manage`` + * - ``scheduler.log`` + - ``openstack-manila-scheduler`` + - ``manila-scheduler`` + * - ``share.log`` + - ``openstack-manila-share`` + - ``manila-share`` + * - ``data.log`` + - ``openstack-manila-data`` + - ``manila-data`` diff --git a/doc/source/configuration/shared-file-systems/overview.rst b/doc/source/configuration/shared-file-systems/overview.rst new file mode 100644 index 0000000000..3353941e9c --- /dev/null +++ b/doc/source/configuration/shared-file-systems/overview.rst @@ -0,0 +1,92 @@ +=============================================== +Introduction to the Shared File Systems service +=============================================== + +The Shared File Systems service provides shared file systems that +Compute instances can consume. + +The overall Shared File Systems service is implemented via the +following specific services: + +manila-api + A WSGI app that authenticates and routes requests + throughout the Shared File Systems service. It supports the OpenStack + APIs. + +manila-data + A standalone service whose purpose is to receive requests, process data + operations with potentially long running time such as copying, share + migration or backup. + +manila-scheduler + Schedules and routes requests to the appropriate + share service. The scheduler uses configurable filters and weighers + to route requests. The Filter Scheduler is the default and enables + filters on things like Capacity, Availability Zone, Share Types, and + Capabilities as well as custom filters. + +manila-share + Manages back-end devices that provide shared file + systems. A manila-share service can run in one of two modes, with or + without handling of share servers. Share servers export file shares + via share networks. When share servers are not used, the networking + requirements are handled outside of Manila. + +The Shared File Systems service contains the following components: + +**Back-end storage devices** + The Shared File Services service requires some form of back-end shared file + system provider that the service is built on. The reference implementation + uses the Block Storage service (Cinder) and a service VM to provide shares. + Additional drivers are used to access shared file systems from a variety of + vendor solutions. + +**Users and tenants (projects)** + The Shared File Systems service can be used by many different cloud + computing consumers or customers (tenants on a shared system), using + role-based access assignments. Roles control the actions that a user is + allowed to perform. In the default configuration, most actions do not + require a particular role unless they are restricted to administrators, but + this can be configured by the system administrator in the appropriate + ``policy.json`` file that maintains the rules. A user's access to manage + particular shares is limited by tenant. Guest access to mount and use shares + is secured by IP and/or user access rules. Quotas used to control resource + consumption across available hardware resources are per tenant. + + For tenants, quota controls are available to limit: + + - The number of shares that can be created. + + - The number of gigabytes that can be provisioned for shares. + + - The number of share snapshots that can be created. + + - The number of gigabytes that can be provisioned for share + snapshots. + + - The number of share networks that can be created. + + You can revise the default quota values with the Shared File Systems + CLI, so the limits placed by quotas are editable by admin users. + +**Shares, snapshots, and share networks** + The basic resources offered by the Shared File Systems service are shares, + snapshots and share networks: + + **Shares** + A share is a unit of storage with a protocol, a size, and an access list. + Shares are the basic primitive provided by Manila. All shares exist on a + backend. Some shares are associated with share networks and share + servers. The main protocols supported are NFS and CIFS, but other + protocols are supported as well. + + **Snapshots** + A snapshot is a point in time copy of a share. Snapshots can only be + used to create new shares (containing the snapshotted data). Shares + cannot be deleted until all associated snapshots are deleted. + + **Share networks** + A share network is a tenant-defined object that informs Manila about the + security and network configuration for a group of shares. Share networks + are only relevant for backends that manage share servers. A share network + contains a security service and network/subnet. diff --git a/doc/source/configuration/shared-file-systems/samples/api-paste.ini.rst b/doc/source/configuration/shared-file-systems/samples/api-paste.ini.rst new file mode 100644 index 0000000000..eb31fcbb7a --- /dev/null +++ b/doc/source/configuration/shared-file-systems/samples/api-paste.ini.rst @@ -0,0 +1,9 @@ +============= +api-paste.ini +============= + +The shared file systems service stores its API configuration settings in the +``api-paste.ini`` file. + +.. literalinclude:: ../../../../../etc/manila/api-paste.ini + :language: ini diff --git a/doc/source/configuration/shared-file-systems/samples/index.rst b/doc/source/configuration/shared-file-systems/samples/index.rst new file mode 100644 index 0000000000..6085025447 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/samples/index.rst @@ -0,0 +1,12 @@ +====================================================== +Shared File Systems service sample configuration files +====================================================== + +All the files in this section can be found in ``/etc/manila``. + +.. toctree:: + + manila.conf.rst + api-paste.ini.rst + policy.json.rst + rootwrap.conf.rst diff --git a/doc/source/configuration/shared-file-systems/samples/manila.conf.rst b/doc/source/configuration/shared-file-systems/samples/manila.conf.rst new file mode 100644 index 0000000000..24f8bf2600 --- /dev/null +++ b/doc/source/configuration/shared-file-systems/samples/manila.conf.rst @@ -0,0 +1,13 @@ +=========== +manila.conf +=========== + +The ``manila.conf`` file is installed in ``/etc/manila`` by default. +When you manually install the Shared File Systems service, the options +in the ``manila.conf`` file are set to default values. + +The ``manila.conf`` file contains most of the options needed to configure +the Shared File Systems service. + +.. literalinclude:: ../../../_static/manila.conf.sample + :language: ini diff --git a/doc/source/configuration/shared-file-systems/samples/policy.json.rst b/doc/source/configuration/shared-file-systems/samples/policy.json.rst new file mode 100644 index 0000000000..cd2c67738f --- /dev/null +++ b/doc/source/configuration/shared-file-systems/samples/policy.json.rst @@ -0,0 +1,9 @@ +=========== +policy.json +=========== + +The ``policy.json`` file defines additional access controls that apply +to the Shared File Systems service. + +.. literalinclude:: ../../../../../etc/manila/policy.json + :language: json diff --git a/doc/source/configuration/shared-file-systems/samples/rootwrap.conf.rst b/doc/source/configuration/shared-file-systems/samples/rootwrap.conf.rst new file mode 100644 index 0000000000..488be3afcf --- /dev/null +++ b/doc/source/configuration/shared-file-systems/samples/rootwrap.conf.rst @@ -0,0 +1,10 @@ +============= +rootwrap.conf +============= + +The ``rootwrap.conf`` file defines configuration values used by the +``rootwrap`` script when the Shared File Systems service must escalate +its privileges to those of the root user. + +.. literalinclude:: ../../../../../etc/manila/rootwrap.conf + :language: ini diff --git a/doc/source/configuration/tables/manila-api.inc b/doc/source/configuration/tables/manila-api.inc new file mode 100644 index 0000000000..5afecb88b4 --- /dev/null +++ b/doc/source/configuration/tables/manila-api.inc @@ -0,0 +1,54 @@ +.. _manila-api: + +.. list-table:: Description of API configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``admin_network_config_group`` = ``None`` + - (String) If share driver requires to setup admin network for share, then define network plugin config options in some separate config group and set its name here. Used only with another option 'driver_handles_share_servers' set to 'True'. + * - ``admin_network_id`` = ``None`` + - (String) ID of neutron network used to communicate with admin network, to create additional admin export locations on. + * - ``admin_subnet_id`` = ``None`` + - (String) ID of neutron subnet used to communicate with admin network, to create additional admin export locations on. Related to 'admin_network_id'. + * - ``api_paste_config`` = ``api-paste.ini`` + - (String) File name for the paste.deploy config for manila-api. + * - ``api_rate_limit`` = ``True`` + - (Boolean) Whether to rate limit the API. + * - ``db_backend`` = ``sqlalchemy`` + - (String) The backend to use for database. + * - ``max_header_line`` = ``16384`` + - (Integer) Maximum line size of message headers to be accepted. Option max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs). + * - ``osapi_max_limit`` = ``1000`` + - (Integer) The maximum number of items returned in a single response from a collection resource. + * - ``osapi_share_base_URL`` = ``None`` + - (String) Base URL to be presented to users in links to the Share API + * - ``osapi_share_ext_list`` = + - (List) Specify list of extensions to load when using osapi_share_extension option with manila.api.contrib.select_extensions. + * - ``osapi_share_extension`` = ``manila.api.contrib.standard_extensions`` + - (List) The osapi share extensions to load. + * - ``osapi_share_listen`` = ``::`` + - (String) IP address for OpenStack Share API to listen on. + * - ``osapi_share_listen_port`` = ``8786`` + - (Port number) Port for OpenStack Share API to listen on. + * - ``osapi_share_workers`` = ``1`` + - (Integer) Number of workers for OpenStack Share API service. + * - ``share_api_class`` = ``manila.share.api.API`` + - (String) The full class name of the share API class to use. + * - ``volume_api_class`` = ``manila.volume.cinder.API`` + - (String) The full class name of the Volume API class to use. + * - ``volume_name_template`` = ``manila-share-%s`` + - (String) Volume name template. + * - ``volume_snapshot_name_template`` = ``manila-snapshot-%s`` + - (String) Volume snapshot name template. + * - **[oslo_middleware]** + - + * - ``enable_proxy_headers_parsing`` = ``False`` + - (Boolean) Whether the application is behind a proxy or not. This determines if the middleware should parse the headers or not. + * - ``max_request_body_size`` = ``114688`` + - (Integer) The maximum body size for each request, in bytes. + * - ``secure_proxy_ssl_header`` = ``X-Forwarded-Proto`` + - (String) DEPRECATED: The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by a SSL termination proxy. diff --git a/doc/source/configuration/tables/manila-ca.inc b/doc/source/configuration/tables/manila-ca.inc new file mode 100644 index 0000000000..0d034ab2a7 --- /dev/null +++ b/doc/source/configuration/tables/manila-ca.inc @@ -0,0 +1,26 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-ca: + +.. list-table:: Description of Certificate Authority configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``ssl_ca_file`` = ``None`` + - (String) CA certificate file to use to verify connecting clients. + * - ``ssl_cert_file`` = ``None`` + - (String) Certificate file to use when starting the server securely. + * - ``ssl_key_file`` = ``None`` + - (String) Private key file to use when starting the server securely. diff --git a/doc/source/configuration/tables/manila-cephfs.inc b/doc/source/configuration/tables/manila-cephfs.inc new file mode 100644 index 0000000000..76d0a62a6a --- /dev/null +++ b/doc/source/configuration/tables/manila-cephfs.inc @@ -0,0 +1,28 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-cephfs: + +.. list-table:: Description of CephFS share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``cephfs_auth_id`` = ``manila`` + - (String) The name of the ceph auth identity to use. + * - ``cephfs_cluster_name`` = ``None`` + - (String) The name of the cluster in use, if it is not the default ('ceph'). + * - ``cephfs_conf_path`` = + - (String) Fully qualified path to the ceph.conf file. + * - ``cephfs_enable_snapshots`` = ``False`` + - (Boolean) Whether to enable snapshots in this driver. diff --git a/doc/source/configuration/tables/manila-common.inc b/doc/source/configuration/tables/manila-common.inc new file mode 100644 index 0000000000..163488150a --- /dev/null +++ b/doc/source/configuration/tables/manila-common.inc @@ -0,0 +1,134 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-common: + +.. list-table:: Description of Common configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``check_hash`` = ``False`` + - (Boolean) Chooses whether hash of each file should be checked on data copying. + * - ``client_socket_timeout`` = ``900`` + - (Integer) Timeout for client connections socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever. + * - ``compute_api_class`` = ``manila.compute.nova.API`` + - (String) The full class name of the Compute API class to use. + * - ``data_access_wait_access_rules_timeout`` = ``180`` + - (Integer) Time to wait for access rules to be allowed/denied on backends when migrating a share (seconds). + * - ``data_manager`` = ``manila.data.manager.DataManager`` + - (String) Full class name for the data manager. + * - ``data_node_access_admin_user`` = ``None`` + - (String) The admin user name registered in the security service in order to allow access to user authentication-based shares. + * - ``data_node_access_cert`` = ``None`` + - (String) The certificate installed in the data node in order to allow access to certificate authentication-based shares. + * - ``data_node_access_ip`` = ``None`` + - (String) The IP of the node interface connected to the admin network. Used for allowing access to the mounting shares. + * - ``data_node_mount_options`` = ``{}`` + - (Dict) Mount options to be included in the mount command for share protocols. Use dictionary format, example: {'nfs': '-o nfsvers=3', 'cifs': '-o user=foo,pass=bar'} + * - ``data_topic`` = ``manila-data`` + - (String) The topic data nodes listen on. + * - ``enable_new_services`` = ``True`` + - (Boolean) Services to be added to the available pool on create. + * - ``fatal_exception_format_errors`` = ``False`` + - (Boolean) Whether to make exception message format errors fatal. + * - ``filter_function`` = ``None`` + - (String) String representation for an equation that will be used to filter hosts. + * - ``host`` = ```` + - (String) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address. + * - ``max_over_subscription_ratio`` = ``20.0`` + - (Floating point) Float representation of the over subscription ratio when thin provisioning is involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times the total physical capacity. If the ratio is 10.5, it means provisioned capacity can be 10.5 times the total physical capacity. A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. A ratio lower than 1.0 is invalid. + * - ``memcached_servers`` = ``None`` + - (List) Memcached servers or None for in process cache. + * - ``monkey_patch`` = ``False`` + - (Boolean) Whether to log monkey patching. + * - ``monkey_patch_modules`` = + - (List) List of modules or decorators to monkey patch. + * - ``mount_tmp_location`` = ``/tmp/`` + - (String) Temporary path to create and mount shares during migration. + * - ``my_ip`` = ```` + - (String) IP address of this host. + * - ``num_shell_tries`` = ``3`` + - (Integer) Number of times to attempt to run flakey shell commands. + * - ``periodic_fuzzy_delay`` = ``60`` + - (Integer) Range of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0) + * - ``periodic_hooks_interval`` = ``300.0`` + - (Floating point) Interval in seconds between execution of periodic hooks. Used when option 'enable_periodic_hooks' is set to True. Default is 300. + * - ``periodic_interval`` = ``60`` + - (Integer) Seconds between running periodic tasks. + * - ``replica_state_update_interval`` = ``300`` + - (Integer) This value, specified in seconds, determines how often the share manager will poll for the health (replica_state) of each replica instance. + * - ``replication_domain`` = ``None`` + - (String) A string specifying the replication domain that the backend belongs to. This option needs to be specified the same in the configuration sections of all backends that support replication between each other. If this option is not specified in the group, it means that replication is not enabled on the backend. + * - ``report_interval`` = ``10`` + - (Integer) Seconds between nodes reporting state to datastore. + * - ``reserved_share_percentage`` = ``0`` + - (Integer) The percentage of backend capacity reserved. + * - ``rootwrap_config`` = ``None`` + - (String) Path to the rootwrap configuration file to use for running commands as root. + * - ``service_down_time`` = ``60`` + - (Integer) Maximum time since last check-in for up service. + * - ``smb_template_config_path`` = ``$state_path/smb.conf`` + - (String) Path to smb config. + * - ``sql_idle_timeout`` = ``3600`` + - (Integer) Timeout before idle SQL connections are reaped. + * - ``sql_max_retries`` = ``10`` + - (Integer) Maximum database connection retries during startup. (setting -1 implies an infinite retry count). + * - ``sql_retry_interval`` = ``10`` + - (Integer) Interval between retries of opening a SQL connection. + * - ``sqlite_clean_db`` = ``clean.sqlite`` + - (String) File name of clean sqlite database. + * - ``sqlite_db`` = ``manila.sqlite`` + - (String) The filename to use with sqlite. + * - ``sqlite_synchronous`` = ``True`` + - (Boolean) If passed, use synchronous mode for sqlite. + * - ``state_path`` = ``/var/lib/manila`` + - (String) Top-level directory for maintaining manila's state. + * - ``storage_availability_zone`` = ``nova`` + - (String) Availability zone of this node. + * - ``tcp_keepalive`` = ``True`` + - (Boolean) Sets the value of TCP_KEEPALIVE (True/False) for each server socket. + * - ``tcp_keepalive_count`` = ``None`` + - (Integer) Sets the value of TCP_KEEPCNT for each server socket. Not supported on OS X. + * - ``tcp_keepalive_interval`` = ``None`` + - (Integer) Sets the value of TCP_KEEPINTVL in seconds for each server socket. Not supported on OS X. + * - ``tcp_keepidle`` = ``600`` + - (Integer) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X. + * - ``until_refresh`` = ``0`` + - (Integer) Count of reservations until usage is refreshed. + * - ``use_forwarded_for`` = ``False`` + - (Boolean) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. + * - ``wsgi_keep_alive`` = ``True`` + - (Boolean) If False, closes the client socket connection explicitly. Setting it to True to maintain backward compatibility. Recommended setting is set it to False. + * - **[coordination]** + - + * - ``backend_url`` = ``file://$state_path`` + - (String) The back end URL to use for distributed coordination. + * - ``heartbeat`` = ``1.0`` + - (Floating point) Number of seconds between heartbeats for distributed coordination. + * - ``initial_reconnect_backoff`` = ``0.1`` + - (Floating point) Initial number of seconds to wait after failed reconnection. + * - ``max_reconnect_backoff`` = ``60.0`` + - (Floating point) Maximum number of seconds between sequential reconnection retries. + * - **[healthcheck]** + - + * - ``backends`` = + - (List) Additional backends that can perform health checks and report that information back as part of a request. + * - ``detailed`` = ``False`` + - (Boolean) Show more detailed information as part of the response + * - ``disable_by_file_path`` = ``None`` + - (String) Check the presence of a file to determine if an application is running on a port. Used by DisableByFileHealthcheck plugin. + * - ``disable_by_file_paths`` = + - (List) Check the presence of a file based on a port to determine if an application is running on a port. Expects a "port:path" list of strings. Used by DisableByFilesPortsHealthcheck plugin. + * - ``path`` = ``/healthcheck`` + - (String) DEPRECATED: The path to respond to healtcheck requests on. diff --git a/doc/source/configuration/tables/manila-compute.inc b/doc/source/configuration/tables/manila-compute.inc new file mode 100644 index 0000000000..a565217145 --- /dev/null +++ b/doc/source/configuration/tables/manila-compute.inc @@ -0,0 +1,34 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-compute: + +.. list-table:: Description of Compute configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``nova_admin_auth_url`` = ``http://localhost:5000/v2.0`` + - (String) DEPRECATED: Identity service URL. This option isn't used any longer. Please use [nova] url instead. + * - ``nova_admin_password`` = ``None`` + - (String) DEPRECATED: Nova admin password. This option isn't used any longer. Please use [nova] password instead. + * - ``nova_admin_tenant_name`` = ``service`` + - (String) DEPRECATED: Nova admin tenant name. This option isn't used any longer. Please use [nova] tenant instead. + * - ``nova_admin_username`` = ``nova`` + - (String) DEPRECATED: Nova admin username. This option isn't used any longer. Please use [nova] username instead. + * - ``nova_catalog_admin_info`` = ``compute:nova:adminURL`` + - (String) DEPRECATED: Same as nova_catalog_info, but for admin endpoint. This option isn't used any longer. + * - ``nova_catalog_info`` = ``compute:nova:publicURL`` + - (String) DEPRECATED: Info to match when looking for nova in the service catalog. Format is separated values of the form: :: This option isn't used any longer. + * - ``os_region_name`` = ``None`` + - (String) Region name of this node. diff --git a/doc/source/configuration/tables/manila-emc.inc b/doc/source/configuration/tables/manila-emc.inc new file mode 100644 index 0000000000..d8dce430b8 --- /dev/null +++ b/doc/source/configuration/tables/manila-emc.inc @@ -0,0 +1,36 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-emc: + +.. list-table:: Description of EMC share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``emc_nas_login`` = ``None`` + - (String) User name for the EMC server. + * - ``emc_nas_password`` = ``None`` + - (String) Password for the EMC server. + * - ``emc_nas_root_dir`` = ``None`` + - (String) The root directory where shares will be located. + * - ``emc_nas_server`` = ``None`` + - (String) EMC server hostname or IP address. + * - ``emc_nas_server_container`` = ``None`` + - (String) DEPRECATED: Storage processor to host the NAS server. Obsolete. Unity driver supports nas server auto load balance. + * - ``emc_nas_server_port`` = ``8080`` + - (Port number) Port number for the EMC server. + * - ``emc_nas_server_secure`` = ``True`` + - (Boolean) Use secure connection to server. + * - ``emc_share_backend`` = ``None`` + - (String) Share backend. diff --git a/doc/source/configuration/tables/manila-ganesha.inc b/doc/source/configuration/tables/manila-ganesha.inc new file mode 100644 index 0000000000..ee74537421 --- /dev/null +++ b/doc/source/configuration/tables/manila-ganesha.inc @@ -0,0 +1,34 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-ganesha: + +.. list-table:: Description of Ganesha configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``ganesha_config_dir`` = ``/etc/ganesha`` + - (String) Directory where Ganesha config files are stored. + * - ``ganesha_config_path`` = ``$ganesha_config_dir/ganesha.conf`` + - (String) Path to main Ganesha config file. + * - ``ganesha_db_path`` = ``$state_path/manila-ganesha.db`` + - (String) Location of Ganesha database file. (Ganesha module only.) + * - ``ganesha_export_dir`` = ``$ganesha_config_dir/export.d`` + - (String) Path to directory containing Ganesha export configuration. (Ganesha module only.) + * - ``ganesha_export_template_dir`` = ``/etc/manila/ganesha-export-templ.d`` + - (String) Path to directory containing Ganesha export block templates. (Ganesha module only.) + * - ``ganesha_nfs_export_options`` = ``maxread = 65536, prefread = 65536`` + - (String) Options to use when exporting a share using ganesha NFS server. Note that these defaults can be overridden when a share is created by passing metadata with key name export_options. Also note the complete set of default ganesha export options is specified in ganesha_utils. (GPFS only.) + * - ``ganesha_service_name`` = ``ganesha.nfsd`` + - (String) Name of the ganesha nfs service. diff --git a/doc/source/configuration/tables/manila-generic.inc b/doc/source/configuration/tables/manila-generic.inc new file mode 100644 index 0000000000..3d42239179 --- /dev/null +++ b/doc/source/configuration/tables/manila-generic.inc @@ -0,0 +1,168 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-generic: + +.. list-table:: Description of Generic share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``cinder_admin_auth_url`` = ``http://localhost:5000/v2.0`` + - (String) DEPRECATED: Identity service URL. This option isn't used any longer. Please use [cinder] auth_url instead. + * - ``cinder_admin_password`` = ``None`` + - (String) DEPRECATED: Cinder admin password. This option isn't used any longer. Please use [cinder] password instead. + * - ``cinder_admin_tenant_name`` = ``service`` + - (String) DEPRECATED: Cinder admin tenant name. This option isn't used any longer. Please use [cinder] tenant_name instead. + * - ``cinder_admin_username`` = ``cinder`` + - (String) DEPRECATED: Cinder admin username. This option isn't used any longer. Please use [cinder] username instead. + * - ``cinder_catalog_info`` = ``volume:cinder:publicURL`` + - (String) DEPRECATED: Info to match when looking for cinder in the service catalog. Format is separated values of the form: :: This option isn't used any longer. + * - ``cinder_volume_type`` = ``None`` + - (String) Name or id of cinder volume type which will be used for all volumes created by driver. + * - ``connect_share_server_to_tenant_network`` = ``False`` + - (Boolean) Attach share server directly to share network. Used only with Neutron and if driver_handles_share_servers=True. + * - ``container_volume_group`` = ``manila_docker_volumes`` + - (String) LVM volume group to use for volumes. This volume group must be created by the cloud administrator independently from manila operations. + * - ``driver_handles_share_servers`` = ``None`` + - (Boolean) There are two possible approaches for share drivers in Manila. First is when share driver is able to handle share-servers and second when not. Drivers can support either both or only one of these approaches. So, set this opt to True if share driver is able to handle share servers and it is desired mode else set False. It is set to None by default to make this choice intentional. + * - ``goodness_function`` = ``None`` + - (String) String representation for an equation that will be used to determine the goodness of a host. + * - ``interface_driver`` = ``manila.network.linux.interface.OVSInterfaceDriver`` + - (String) Vif driver. Used only with Neutron and if driver_handles_share_servers=True. + * - ``manila_service_keypair_name`` = ``manila-service`` + - (String) Keypair name that will be created and used for service instances. Only used if driver_handles_share_servers=True. + * - ``max_time_to_attach`` = ``120`` + - (Integer) Maximum time to wait for attaching cinder volume. + * - ``max_time_to_build_instance`` = ``300`` + - (Integer) Maximum time in seconds to wait for creating service instance. + * - ``max_time_to_create_volume`` = ``180`` + - (Integer) Maximum time to wait for creating cinder volume. + * - ``max_time_to_extend_volume`` = ``180`` + - (Integer) Maximum time to wait for extending cinder volume. + * - ``ovs_integration_bridge`` = ``br-int`` + - (String) Name of Open vSwitch bridge to use. + * - ``path_to_private_key`` = ``None`` + - (String) Path to host's private key. + * - ``path_to_public_key`` = ``~/.ssh/id_rsa.pub`` + - (String) Path to hosts public key. Only used if driver_handles_share_servers=True. + * - ``protocol_access_mapping`` = ``{'ip': ['nfs'], 'user': ['cifs']}`` + - (Dict) Protocol access mapping for this backend. Should be a dictionary comprised of {'access_type1': ['share_proto1', 'share_proto2'], 'access_type2': ['share_proto2', 'share_proto3']}. + * - ``service_image_name`` = ``manila-service-image`` + - (String) Name of image in Glance, that will be used for service instance creation. Only used if driver_handles_share_servers=True. + * - ``service_instance_flavor_id`` = ``100`` + - (Integer) ID of flavor, that will be used for service instance creation. Only used if driver_handles_share_servers=True. + * - ``service_instance_name_or_id`` = ``None`` + - (String) Name or ID of service instance in Nova to use for share exports. Used only when share servers handling is disabled. + * - ``service_instance_name_template`` = ``manila_service_instance_%s`` + - (String) Name of service instance. Only used if driver_handles_share_servers=True. + * - ``service_instance_network_helper_type`` = ``neutron`` + - (String) DEPRECATED: Used to select between neutron and nova helpers when driver_handles_share_servers=True. Obsolete. This option isn't used any longer because nova networking is no longer supported. + * - ``service_instance_password`` = ``None`` + - (String) Password for service instance user. + * - ``service_instance_security_group`` = ``manila-service`` + - (String) Security group name, that will be used for service instance creation. Only used if driver_handles_share_servers=True. + * - ``service_instance_smb_config_path`` = ``$share_mount_path/smb.conf`` + - (String) Path to SMB config in service instance. + * - ``service_instance_user`` = ``None`` + - (String) User in service instance that will be used for authentication. + * - ``service_net_name_or_ip`` = ``None`` + - (String) Can be either name of network that is used by service instance within Nova to get IP address or IP address itself for managing shares there. Used only when share servers handling is disabled. + * - ``service_network_cidr`` = ``10.254.0.0/16`` + - (String) CIDR of manila service network. Used only with Neutron and if driver_handles_share_servers=True. + * - ``service_network_division_mask`` = ``28`` + - (Integer) This mask is used for dividing service network into subnets, IP capacity of subnet with this mask directly defines possible amount of created service VMs per tenant's subnet. Used only with Neutron and if driver_handles_share_servers=True. + * - ``service_network_name`` = ``manila_service_network`` + - (String) Name of manila service network. Used only with Neutron. Only used if driver_handles_share_servers=True. + * - ``share_helpers`` = ``CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess, NFS=manila.share.drivers.helpers.NFSHelper`` + - (List) Specify list of share export helpers. + * - ``share_mount_path`` = ``/shares`` + - (String) Parent path in service instance where shares will be mounted. + * - ``share_mount_template`` = ``mount -vt %(proto)s %(options)s %(export)s %(path)s`` + - (String) The template for mounting shares for this backend. Must specify the executable with all necessary parameters for the protocol supported. 'proto' template element may not be required if included in the command. 'export' and 'path' template elements are required. It is advisable to separate different commands per backend. + * - ``share_unmount_template`` = ``umount -v %(path)s`` + - (String) The template for unmounting shares for this backend. Must specify the executable with all necessary parameters for the protocol supported. 'path' template element is required. It is advisable to separate different commands per backend. + * - ``share_volume_fstype`` = ``ext4`` + - (String) Filesystem type of the share volume. + * - ``tenant_net_name_or_ip`` = ``None`` + - (String) Can be either name of network that is used by service instance within Nova to get IP address or IP address itself for exporting shares. Used only when share servers handling is disabled. + * - ``volume_name_template`` = ``manila-share-%s`` + - (String) Volume name template. + * - ``volume_snapshot_name_template`` = ``manila-snapshot-%s`` + - (String) Volume snapshot name template. + * - **[cinder]** + - + * - ``api_insecure`` = ``False`` + - (Boolean) Allow to perform insecure SSL requests to cinder. + * - ``auth_section`` = ``None`` + - (Unknown) Config Section from which to load plugin specific options + * - ``auth_type`` = ``None`` + - (Unknown) Authentication type to load + * - ``ca_certificates_file`` = ``None`` + - (String) Location of CA certificates file to use for cinder client requests. + * - ``cafile`` = ``None`` + - (String) PEM encoded Certificate Authority to use when verifying HTTPs connections. + * - ``certfile`` = ``None`` + - (String) PEM encoded client certificate cert file + * - ``cross_az_attach`` = ``True`` + - (Boolean) Allow attaching between instances and volumes in different availability zones. + * - ``endpoint_type`` = ``publicURL`` + - (String) Endpoint type to be used with cinder client calls. + * - ``http_retries`` = ``3`` + - (Integer) Number of cinderclient retries on failed HTTP calls. + * - ``insecure`` = ``False`` + - (Boolean) Verify HTTPS connections. + * - ``keyfile`` = ``None`` + - (String) PEM encoded client certificate key file + * - ``region_name`` = ``None`` + - (String) Region name for connecting to cinder. + * - ``timeout`` = ``None`` + - (Integer) Timeout value for http requests + * - **[neutron]** + - + * - ``cafile`` = ``None`` + - (String) PEM encoded Certificate Authority to use when verifying HTTPs connections. + * - ``certfile`` = ``None`` + - (String) PEM encoded client certificate cert file + * - ``insecure`` = ``False`` + - (Boolean) Verify HTTPS connections. + * - ``keyfile`` = ``None`` + - (String) PEM encoded client certificate key file + * - ``timeout`` = ``None`` + - (Integer) Timeout value for http requests + * - **[nova]** + - + * - ``api_insecure`` = ``False`` + - (Boolean) Allow to perform insecure SSL requests to nova. + * - ``api_microversion`` = ``2.10`` + - (String) Version of Nova API to be used. + * - ``auth_section`` = ``None`` + - (Unknown) Config Section from which to load plugin specific options + * - ``auth_type`` = ``None`` + - (Unknown) Authentication type to load + * - ``ca_certificates_file`` = ``None`` + - (String) Location of CA certificates file to use for nova client requests. + * - ``cafile`` = ``None`` + - (String) PEM encoded Certificate Authority to use when verifying HTTPs connections. + * - ``certfile`` = ``None`` + - (String) PEM encoded client certificate cert file + * - ``endpoint_type`` = ``publicURL`` + - (String) Endpoint type to be used with nova client calls. + * - ``insecure`` = ``False`` + - (Boolean) Verify HTTPS connections. + * - ``keyfile`` = ``None`` + - (String) PEM encoded client certificate key file + * - ``region_name`` = ``None`` + - (String) Region name for connecting to nova. + * - ``timeout`` = ``None`` + - (Integer) Timeout value for http requests diff --git a/doc/source/configuration/tables/manila-glusterfs.inc b/doc/source/configuration/tables/manila-glusterfs.inc new file mode 100644 index 0000000000..02e3507ddb --- /dev/null +++ b/doc/source/configuration/tables/manila-glusterfs.inc @@ -0,0 +1,42 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-glusterfs: + +.. list-table:: Description of GlusterFS share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``glusterfs_ganesha_server_ip`` = ``None`` + - (String) Remote Ganesha server node's IP address. + * - ``glusterfs_ganesha_server_password`` = ``None`` + - (String) Remote Ganesha server node's login password. This is not required if 'glusterfs_path_to_private_key' is configured. + * - ``glusterfs_ganesha_server_username`` = ``root`` + - (String) Remote Ganesha server node's username. + * - ``glusterfs_mount_point_base`` = ``$state_path/mnt`` + - (String) Base directory containing mount points for Gluster volumes. + * - ``glusterfs_nfs_server_type`` = ``Gluster`` + - (String) Type of NFS server that mediate access to the Gluster volumes (Gluster or Ganesha). + * - ``glusterfs_path_to_private_key`` = ``None`` + - (String) Path of Manila host's private SSH key file. + * - ``glusterfs_server_password`` = ``None`` + - (String) Remote GlusterFS server node's login password. This is not required if 'glusterfs_path_to_private_key' is configured. + * - ``glusterfs_servers`` = + - (List) List of GlusterFS servers that can be used to create shares. Each GlusterFS server should be of the form [remoteuser@], and they are assumed to belong to distinct Gluster clusters. + * - ``glusterfs_share_layout`` = ``None`` + - (String) Specifies GlusterFS share layout, that is, the method of associating backing GlusterFS resources to shares. + * - ``glusterfs_target`` = ``None`` + - (String) Specifies the GlusterFS volume to be mounted on the Manila host. It is of the form [remoteuser@]:. + * - ``glusterfs_volume_pattern`` = ``None`` + - (String) Regular expression template used to filter GlusterFS volumes for share creation. The regex template can optionally (ie. with support of the GlusterFS backend) contain the #{size} parameter which matches an integer (sequence of digits) in which case the value shall be interpreted as size of the volume in GB. Examples: "manila-share-volume-\d+$", "manila-share-volume-#{size}G-\d+$"; with matching volume names, respectively: "manila-share-volume-12", "manila-share-volume-3G-13". In latter example, the number that matches "#{size}", that is, 3, is an indication that the size of volume is 3G. diff --git a/doc/source/configuration/tables/manila-hdfs.inc b/doc/source/configuration/tables/manila-hdfs.inc new file mode 100644 index 0000000000..4e0b983c64 --- /dev/null +++ b/doc/source/configuration/tables/manila-hdfs.inc @@ -0,0 +1,32 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-hdfs: + +.. list-table:: Description of HDFS share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``hdfs_namenode_ip`` = ``None`` + - (String) The IP of the HDFS namenode. + * - ``hdfs_namenode_port`` = ``9000`` + - (Port number) The port of HDFS namenode service. + * - ``hdfs_ssh_name`` = ``None`` + - (String) HDFS namenode ssh login name. + * - ``hdfs_ssh_port`` = ``22`` + - (Port number) HDFS namenode SSH port. + * - ``hdfs_ssh_private_key`` = ``None`` + - (String) Path to HDFS namenode SSH private key for login. + * - ``hdfs_ssh_pw`` = ``None`` + - (String) HDFS namenode SSH login password, This parameter is not necessary, if 'hdfs_ssh_private_key' is configured. diff --git a/doc/source/configuration/tables/manila-hds_hnas.inc b/doc/source/configuration/tables/manila-hds_hnas.inc new file mode 100644 index 0000000000..75705c11cf --- /dev/null +++ b/doc/source/configuration/tables/manila-hds_hnas.inc @@ -0,0 +1,50 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-hds_hnas: + +.. list-table:: Description of HDS NAS share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``hitachi_hnas_admin_network_ip`` = ``None`` + - (String) Specify IP for mounting shares in the Admin network. + * - ``hitachi_hnas_allow_cifs_snapshot_while_mounted`` = ``False`` + - (Boolean) By default, CIFS snapshots are not allowed to be taken when the share has clients connected because consistent point-in-time replica cannot be guaranteed for all files. Enabling this might cause inconsistent snapshots on CIFS shares. + * - ``hitachi_hnas_cluster_admin_ip0`` = ``None`` + - (String) The IP of the clusters admin node. Only set in HNAS multinode clusters. + * - ``hitachi_hnas_driver_helper`` = ``manila.share.drivers.hitachi.hnas.ssh.HNASSSHBackend`` + - (String) Python class to be used for driver helper. + * - ``hitachi_hnas_evs_id`` = ``None`` + - (Integer) Specify which EVS this backend is assigned to. + * - ``hitachi_hnas_evs_ip`` = ``None`` + - (String) Specify IP for mounting shares. + * - ``hitachi_hnas_file_system_name`` = ``None`` + - (String) Specify file-system name for creating shares. + * - ``hitachi_hnas_ip`` = ``None`` + - (String) HNAS management interface IP for communication between Manila controller and HNAS. + * - ``hitachi_hnas_password`` = ``None`` + - (String) HNAS user password. Required only if private key is not provided. + * - ``hitachi_hnas_ssh_private_key`` = ``None`` + - (String) RSA/DSA private key value used to connect into HNAS. Required only if password is not provided. + * - ``hitachi_hnas_stalled_job_timeout`` = ``30`` + - (Integer) The time (in seconds) to wait for stalled HNAS jobs before aborting. + * - ``hitachi_hnas_user`` = ``None`` + - (String) HNAS username Base64 String in order to perform tasks such as create file-systems and network interfaces. + * - **[hnas1]** + - + * - ``share_backend_name`` = ``None`` + - (String) The backend name for a given driver implementation. + * - ``share_driver`` = ``manila.share.drivers.generic.GenericShareDriver`` + - (String) Driver to use for share creation. diff --git a/doc/source/configuration/tables/manila-hds_hsp.inc b/doc/source/configuration/tables/manila-hds_hsp.inc new file mode 100644 index 0000000000..cf8b696573 --- /dev/null +++ b/doc/source/configuration/tables/manila-hds_hsp.inc @@ -0,0 +1,24 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-hds_hsp: + +.. list-table:: Description of HDS HSP share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[hsp1]** + - + * - ``share_backend_name`` = ``None`` + - (String) The backend name for a given driver implementation. + * - ``share_driver`` = ``manila.share.drivers.generic.GenericShareDriver`` + - (String) Driver to use for share creation. diff --git a/doc/source/configuration/tables/manila-hnas.inc b/doc/source/configuration/tables/manila-hnas.inc new file mode 100644 index 0000000000..39d5a67102 --- /dev/null +++ b/doc/source/configuration/tables/manila-hnas.inc @@ -0,0 +1,22 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-hnas: + +.. list-table:: Description of hnas configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``hds_hnas_driver_helper`` = ``manila.share.drivers.hitachi.ssh.HNASSSHBackend`` + - (String) Python class to be used for driver helper. diff --git a/doc/source/configuration/tables/manila-hpe3par.inc b/doc/source/configuration/tables/manila-hpe3par.inc new file mode 100644 index 0000000000..e42f07a817 --- /dev/null +++ b/doc/source/configuration/tables/manila-hpe3par.inc @@ -0,0 +1,50 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-hpe3par: + +.. list-table:: Description of HPE 3PAR share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``hpe3par_api_url`` = + - (String) 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 + * - ``hpe3par_cifs_admin_access_domain`` = ``LOCAL_CLUSTER`` + - (String) File system domain for the CIFS admin user. + * - ``hpe3par_cifs_admin_access_password`` = + - (String) File system admin password for CIFS. + * - ``hpe3par_cifs_admin_access_username`` = + - (String) File system admin user name for CIFS. + * - ``hpe3par_debug`` = ``False`` + - (Boolean) Enable HTTP debugging to 3PAR + * - ``hpe3par_fpg`` = ``None`` + - (Unknown) The File Provisioning Group (FPG) to use + * - ``hpe3par_fstore_per_share`` = ``False`` + - (Boolean) Use one filestore per share + * - ``hpe3par_password`` = + - (String) 3PAR password for the user specified in hpe3par_username + * - ``hpe3par_require_cifs_ip`` = ``False`` + - (Boolean) Require IP access rules for CIFS (in addition to user) + * - ``hpe3par_san_ip`` = + - (String) IP address of SAN controller + * - ``hpe3par_san_login`` = + - (String) Username for SAN controller + * - ``hpe3par_san_password`` = + - (String) Password for SAN controller + * - ``hpe3par_san_ssh_port`` = ``22`` + - (Port number) SSH port to use with SAN + * - ``hpe3par_share_mount_path`` = ``/mnt/`` + - (String) The path where shares will be mounted when deleting nested file trees. + * - ``hpe3par_username`` = + - (String) 3PAR username with the 'edit' role diff --git a/doc/source/configuration/tables/manila-huawei.inc b/doc/source/configuration/tables/manila-huawei.inc new file mode 100644 index 0000000000..0d29b62d80 --- /dev/null +++ b/doc/source/configuration/tables/manila-huawei.inc @@ -0,0 +1,22 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-huawei: + +.. list-table:: Description of Huawei share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``manila_huawei_conf_file`` = ``/etc/manila/manila_huawei_conf.xml`` + - (String) The configuration file for the Manila Huawei driver. diff --git a/doc/source/configuration/tables/manila-lvm.inc b/doc/source/configuration/tables/manila-lvm.inc new file mode 100644 index 0000000000..2c9857a5ad --- /dev/null +++ b/doc/source/configuration/tables/manila-lvm.inc @@ -0,0 +1,30 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-lvm: + +.. list-table:: Description of LVM share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``lvm_share_export_ip`` = ``None`` + - (String) IP to be added to export string. + * - ``lvm_share_export_root`` = ``$state_path/mnt`` + - (String) Base folder where exported shares are located. + * - ``lvm_share_helpers`` = ``CIFS=manila.share.drivers.helpers.CIFSHelperUserAccess, NFS=manila.share.drivers.helpers.NFSHelper`` + - (List) Specify list of share export helpers. + * - ``lvm_share_mirrors`` = ``0`` + - (Integer) If set, create LVMs with multiple mirrors. Note that this requires lvm_mirrors + 2 PVs with available space. + * - ``lvm_share_volume_group`` = ``lvm-shares`` + - (String) Name for the VG that will contain exported shares. diff --git a/doc/source/configuration/tables/manila-maprfs.inc b/doc/source/configuration/tables/manila-maprfs.inc new file mode 100644 index 0000000000..33fa871088 --- /dev/null +++ b/doc/source/configuration/tables/manila-maprfs.inc @@ -0,0 +1,38 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-maprfs: + +.. list-table:: Description of MapRFS share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``maprfs_base_volume_dir`` = ``/`` + - (String) Path in MapRFS where share volumes must be created. + * - ``maprfs_cldb_ip`` = ``None`` + - (List) The list of IPs or hostnames of CLDB nodes. + * - ``maprfs_clinode_ip`` = ``None`` + - (List) The list of IPs or hostnames of nodes where mapr-core is installed. + * - ``maprfs_rename_managed_volume`` = ``True`` + - (Boolean) Specify whether existing volume should be renamed when start managing. + * - ``maprfs_ssh_name`` = ``mapr`` + - (String) Cluster admin user ssh login name. + * - ``maprfs_ssh_port`` = ``22`` + - (Port number) CLDB node SSH port. + * - ``maprfs_ssh_private_key`` = ``None`` + - (String) Path to SSH private key for login. + * - ``maprfs_ssh_pw`` = ``None`` + - (String) Cluster node SSH login password, This parameter is not necessary, if 'maprfs_ssh_private_key' is configured. + * - ``maprfs_zookeeper_ip`` = ``None`` + - (List) The list of IPs or hostnames of ZooKeeper nodes. diff --git a/doc/source/configuration/tables/manila-netapp.inc b/doc/source/configuration/tables/manila-netapp.inc new file mode 100644 index 0000000000..36ea6c4468 --- /dev/null +++ b/doc/source/configuration/tables/manila-netapp.inc @@ -0,0 +1,56 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-netapp: + +.. list-table:: Description of NetApp share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``netapp_aggregate_name_search_pattern`` = ``(.*)`` + - (String) Pattern for searching available aggregates for provisioning. + * - ``netapp_enabled_share_protocols`` = ``nfs3, nfs4.0`` + - (List) The NFS protocol versions that will be enabled. Supported values include nfs3, nfs4.0, nfs4.1. This option only applies when the option driver_handles_share_servers is set to True. + * - ``netapp_lif_name_template`` = ``os_%(net_allocation_id)s`` + - (String) Logical interface (LIF) name template + * - ``netapp_login`` = ``None`` + - (String) Administrative user account name used to access the storage system. + * - ``netapp_password`` = ``None`` + - (String) Password for the administrative user account specified in the netapp_login option. + * - ``netapp_port_name_search_pattern`` = ``(.*)`` + - (String) Pattern for overriding the selection of network ports on which to create Vserver LIFs. + * - ``netapp_root_volume`` = ``root`` + - (String) Root volume name. + * - ``netapp_root_volume_aggregate`` = ``None`` + - (String) Name of aggregate to create Vserver root volumes on. This option only applies when the option driver_handles_share_servers is set to True. + * - ``netapp_server_hostname`` = ``None`` + - (String) The hostname (or IP address) for the storage system. + * - ``netapp_server_port`` = ``None`` + - (Port number) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS. + * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` + - (Integer) The maximum time in seconds to wait for existing snapmirror transfers to complete before aborting when promoting a replica. + * - ``netapp_storage_family`` = ``ontap_cluster`` + - (String) The storage family type used on the storage system; valid values include ontap_cluster for using clustered Data ONTAP. + * - ``netapp_trace_flags`` = ``None`` + - (String) Comma-separated list of options that control which trace info is written to the debug logs. Values include method and api. + * - ``netapp_transport_type`` = ``http`` + - (String) The transport protocol used when communicating with the storage system or proxy server. Valid values are http or https. + * - ``netapp_volume_move_cutover_timeout`` = ``3600`` + - (Integer) The maximum time in seconds to wait for the completion of a volume move operation after the cutover was triggered. + * - ``netapp_volume_name_template`` = ``share_%(share_id)s`` + - (String) NetApp volume name template. + * - ``netapp_volume_snapshot_reserve_percent`` = ``5`` + - (Integer) The percentage of share space set aside as reserve for snapshot usage; valid values range from 0 to 90. + * - ``netapp_vserver_name_template`` = ``os_%s`` + - (String) Name template to use for new Vserver. diff --git a/doc/source/configuration/tables/manila-quobyte.inc b/doc/source/configuration/tables/manila-quobyte.inc new file mode 100644 index 0000000000..5e293b3999 --- /dev/null +++ b/doc/source/configuration/tables/manila-quobyte.inc @@ -0,0 +1,36 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-quobyte: + +.. list-table:: Description of Quobyte share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``quobyte_api_ca`` = ``None`` + - (String) The X.509 CA file to verify the server cert. + * - ``quobyte_api_password`` = ``quobyte`` + - (String) Password for Quobyte API server + * - ``quobyte_api_url`` = ``None`` + - (String) URL of the Quobyte API server (http or https) + * - ``quobyte_api_username`` = ``admin`` + - (String) Username for Quobyte API server. + * - ``quobyte_default_volume_group`` = ``root`` + - (String) Default owning group for new volumes. + * - ``quobyte_default_volume_user`` = ``root`` + - (String) Default owning user for new volumes. + * - ``quobyte_delete_shares`` = ``False`` + - (Boolean) Actually deletes shares (vs. unexport) + * - ``quobyte_volume_configuration`` = ``BASE`` + - (String) Name of volume configuration used for new shares. diff --git a/doc/source/configuration/tables/manila-quota.inc b/doc/source/configuration/tables/manila-quota.inc new file mode 100644 index 0000000000..b96b4fcefa --- /dev/null +++ b/doc/source/configuration/tables/manila-quota.inc @@ -0,0 +1,38 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-quota: + +.. list-table:: Description of Quota configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``max_age`` = ``0`` + - (Integer) Number of seconds between subsequent usage refreshes. + * - ``max_gigabytes`` = ``10000`` + - (Integer) Maximum number of volume gigabytes to allow per host. + * - ``quota_driver`` = ``manila.quota.DbQuotaDriver`` + - (String) Default driver to use for quota checks. + * - ``quota_gigabytes`` = ``1000`` + - (Integer) Number of share gigabytes allowed per project. + * - ``quota_share_networks`` = ``10`` + - (Integer) Number of share-networks allowed per project. + * - ``quota_shares`` = ``50`` + - (Integer) Number of shares allowed per project. + * - ``quota_snapshot_gigabytes`` = ``1000`` + - (Integer) Number of snapshot gigabytes allowed per project. + * - ``quota_snapshots`` = ``50`` + - (Integer) Number of share snapshots allowed per project. + * - ``reservation_expire`` = ``86400`` + - (Integer) Number of seconds until a reservation expires. diff --git a/doc/source/configuration/tables/manila-redis.inc b/doc/source/configuration/tables/manila-redis.inc new file mode 100644 index 0000000000..33df98457e --- /dev/null +++ b/doc/source/configuration/tables/manila-redis.inc @@ -0,0 +1,36 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-redis: + +.. list-table:: Description of Redis configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[matchmaker_redis]** + - + * - ``check_timeout`` = ``20000`` + - (Integer) Time in ms to wait before the transaction is killed. + * - ``host`` = ``127.0.0.1`` + - (String) DEPRECATED: Host to locate redis. Replaced by [DEFAULT]/transport_url + * - ``password`` = + - (String) DEPRECATED: Password for Redis server (optional). Replaced by [DEFAULT]/transport_url + * - ``port`` = ``6379`` + - (Port number) DEPRECATED: Use this port to connect to redis host. Replaced by [DEFAULT]/transport_url + * - ``sentinel_group_name`` = ``oslo-messaging-zeromq`` + - (String) Redis replica set name. + * - ``sentinel_hosts`` = + - (List) DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g., [host:port, host1:port ... ] Replaced by [DEFAULT]/transport_url + * - ``socket_timeout`` = ``10000`` + - (Integer) Timeout in ms on blocking socket operations. + * - ``wait_timeout`` = ``2000`` + - (Integer) Time in ms to wait between connection attempts. diff --git a/doc/source/configuration/tables/manila-san.inc b/doc/source/configuration/tables/manila-san.inc new file mode 100644 index 0000000000..812aef2564 --- /dev/null +++ b/doc/source/configuration/tables/manila-san.inc @@ -0,0 +1,26 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-san: + +.. list-table:: Description of SAN configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``ssh_conn_timeout`` = ``60`` + - (Integer) Backend server SSH connection timeout. + * - ``ssh_max_pool_conn`` = ``10`` + - (Integer) Maximum number of connections in the SSH pool. + * - ``ssh_min_pool_conn`` = ``1`` + - (Integer) Minimum number of connections in the SSH pool. diff --git a/doc/source/configuration/tables/manila-scheduler.inc b/doc/source/configuration/tables/manila-scheduler.inc new file mode 100644 index 0000000000..6c23cad475 --- /dev/null +++ b/doc/source/configuration/tables/manila-scheduler.inc @@ -0,0 +1,40 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-scheduler: + +.. list-table:: Description of Scheduler configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``capacity_weight_multiplier`` = ``1.0`` + - (Floating point) Multiplier used for weighing share capacity. Negative numbers mean to stack vs spread. + * - ``pool_weight_multiplier`` = ``1.0`` + - (Floating point) Multiplier used for weighing pools which have existing share servers. Negative numbers mean to spread vs stack. + * - ``scheduler_default_filters`` = ``AvailabilityZoneFilter, CapacityFilter, CapabilitiesFilter, DriverFilter, ShareReplicationFilter`` + - (List) Which filter class names to use for filtering hosts when not specified in the request. + * - ``scheduler_default_weighers`` = ``CapacityWeigher, GoodnessWeigher`` + - (List) Which weigher class names to use for weighing hosts. + * - ``scheduler_driver`` = ``manila.scheduler.drivers.filter.FilterScheduler`` + - (String) Default scheduler driver to use. + * - ``scheduler_host_manager`` = ``manila.scheduler.host_manager.HostManager`` + - (String) The scheduler host manager class to use. + * - ``scheduler_json_config_location`` = + - (String) Absolute path to scheduler configuration JSON file. + * - ``scheduler_manager`` = ``manila.scheduler.manager.SchedulerManager`` + - (String) Full class name for the scheduler manager. + * - ``scheduler_max_attempts`` = ``3`` + - (Integer) Maximum number of attempts to schedule a share. + * - ``scheduler_topic`` = ``manila-scheduler`` + - (String) The topic scheduler nodes listen on. diff --git a/doc/source/configuration/tables/manila-share.inc b/doc/source/configuration/tables/manila-share.inc new file mode 100644 index 0000000000..ed29ad519f --- /dev/null +++ b/doc/source/configuration/tables/manila-share.inc @@ -0,0 +1,80 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-share: + +.. list-table:: Description of Share configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``automatic_share_server_cleanup`` = ``True`` + - (Boolean) If set to True, then Manila will delete all share servers which were unused more than specified time .If set to False - automatic deletion of share servers will be disabled. + * - ``backlog`` = ``4096`` + - (Integer) Number of backlog requests to configure the socket with. + * - ``default_share_group_type`` = ``None`` + - (String) Default share group type to use. + * - ``default_share_type`` = ``None`` + - (String) Default share type to use. + * - ``delete_share_server_with_last_share`` = ``False`` + - (Boolean) Whether share servers will be deleted on deletion of the last share. + * - ``driver_handles_share_servers`` = ``None`` + - (Boolean) There are two possible approaches for share drivers in Manila. First is when share driver is able to handle share-servers and second when not. Drivers can support either both or only one of these approaches. So, set this opt to True if share driver is able to handle share servers and it is desired mode else set False. It is set to None by default to make this choice intentional. + * - ``enable_periodic_hooks`` = ``False`` + - (Boolean) Whether to enable periodic hooks or not. + * - ``enable_post_hooks`` = ``False`` + - (Boolean) Whether to enable post hooks or not. + * - ``enable_pre_hooks`` = ``False`` + - (Boolean) Whether to enable pre hooks or not. + * - ``enabled_share_backends`` = ``None`` + - (List) A list of share backend names to use. These backend names should be backed by a unique [CONFIG] group with its options. + * - ``enabled_share_protocols`` = ``NFS, CIFS`` + - (List) Specify list of protocols to be allowed for share creation. Available values are '('NFS', 'CIFS', 'GLUSTERFS', 'HDFS', 'CEPHFS', 'MAPRFS')' + * - ``executor_thread_pool_size`` = ``64`` + - (Integer) Size of executor thread pool. + * - ``hook_drivers`` = + - (List) Driver(s) to perform some additional actions before and after share driver actions and on a periodic basis. Default is []. + * - ``migration_create_delete_share_timeout`` = ``300`` + - (Integer) Timeout for creating and deleting share instances when performing share migration (seconds). + * - ``migration_driver_continue_update_interval`` = ``60`` + - (Integer) This value, specified in seconds, determines how often the share manager will poll the driver to perform the next step of migration in the storage backend, for a migrating share. + * - ``migration_ignore_files`` = ``lost+found`` + - (List) List of files and folders to be ignored when migrating shares. Items should be names (not including any path). + * - ``migration_readonly_rules_support`` = ``True`` + - (Boolean) DEPRECATED: Specify whether read only access rule mode is supported in this backend. Obsolete. All drivers are now required to support read-only access rules. + * - ``migration_wait_access_rules_timeout`` = ``180`` + - (Integer) Time to wait for access rules to be allowed/denied on backends when migrating shares using generic approach (seconds). + * - ``network_config_group`` = ``None`` + - (String) Name of the configuration group in the Manila conf file to look for network config options.If not set, the share backend's config group will be used.If an option is not found within provided group, then'DEFAULT' group will be used for search of option. + * - ``root_helper`` = ``sudo`` + - (String) Deprecated: command to use for running commands as root. + * - ``share_manager`` = ``manila.share.manager.ShareManager`` + - (String) Full class name for the share manager. + * - ``share_name_template`` = ``share-%s`` + - (String) Template string to be used to generate share names. + * - ``share_snapshot_name_template`` = ``share-snapshot-%s`` + - (String) Template string to be used to generate share snapshot names. + * - ``share_topic`` = ``manila-share`` + - (String) The topic share nodes listen on. + * - ``share_usage_audit_period`` = ``month`` + - (String) Time period to generate share usages for. Time period must be hour, day, month or year. + * - ``suppress_post_hooks_errors`` = ``False`` + - (Boolean) Whether to suppress post hook errors (allow driver's results to pass through) or not. + * - ``suppress_pre_hooks_errors`` = ``False`` + - (Boolean) Whether to suppress pre hook errors (allow driver perform actions) or not. + * - ``unmanage_remove_access_rules`` = ``False`` + - (Boolean) If set to True, then manila will deny access and remove all access rules on share unmanage.If set to False - nothing will be changed. + * - ``unused_share_server_cleanup_interval`` = ``10`` + - (Integer) Unallocated share servers reclamation time interval (minutes). Minimum value is 10 minutes, maximum is 60 minutes. The reclamation function is run every 10 minutes and delete share servers which were unused more than unused_share_server_cleanup_interval option defines. This value reflects the shortest time Manila will wait for a share server to go unutilized before deleting it. + * - ``use_scheduler_creating_share_from_snapshot`` = ``False`` + - (Boolean) If set to False, then share creation from snapshot will be performed on the same host. If set to True, then scheduling step will be used. diff --git a/doc/source/configuration/tables/manila-spectrumscale_ces.inc b/doc/source/configuration/tables/manila-spectrumscale_ces.inc new file mode 100644 index 0000000000..3cc13a940d --- /dev/null +++ b/doc/source/configuration/tables/manila-spectrumscale_ces.inc @@ -0,0 +1,57 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-spectrumscale_ces: + +.. list-table:: Description of IBM Spectrum Scale CES share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + + * - **[DEFAULT]** + - + + * - ``gpfs_mount_point_base`` = ``$state_path/mnt`` + + - (String) Base folder where exported shares are located. + + * - ``gpfs_nfs_server_type`` = ``CES`` + + - (String) NFS Server type. Valid choices are "CES" (Ganesha NFS) or "KNFS" (Kernel NFS). + + * - ``gpfs_share_export_ip`` = ``None`` + + - (Host address) IP to be added to GPFS export string. + + * - ``gpfs_share_helpers`` = ``KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper, CES=manila.share.drivers.ibm.gpfs.CESHelper`` + + - (List) Specify list of share export helpers. + + * - ``gpfs_ssh_login`` = ``None`` + + - (String) GPFS server SSH login name. + + * - ``gpfs_ssh_password`` = ``None`` + + - (String) GPFS server SSH login password. The password is not needed, if 'gpfs_ssh_private_key' is configured. + + * - ``gpfs_ssh_port`` = ``22`` + + - (Port number) GPFS server SSH port. + + * - ``gpfs_ssh_private_key`` = ``None`` + + - (String) Path to GPFS server SSH private key for login. + + * - ``is_gpfs_node`` = ``False`` + + - (Boolean) True:when Manila services are running on one of the Spectrum Scale node. False:when Manila services are not running on any of the Spectrum Scale node. diff --git a/doc/source/configuration/tables/manila-spectrumscale_knfs.inc b/doc/source/configuration/tables/manila-spectrumscale_knfs.inc new file mode 100644 index 0000000000..6db218e90b --- /dev/null +++ b/doc/source/configuration/tables/manila-spectrumscale_knfs.inc @@ -0,0 +1,61 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-spectrumscale_knfs: + +.. list-table:: Description of IBM Spectrum Scale KNFS share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + + * - **[DEFAULT]** + - + + * - ``gpfs_mount_point_base`` = ``$state_path/mnt`` + + - (String) Base folder where exported shares are located. + + * - ``gpfs_nfs_server_list`` = ``None`` + + - (List) A list of the fully qualified NFS server names that make up the OpenStack Manila configuration. + + * - ``gpfs_nfs_server_type`` = ``CES`` + + - (String) NFS Server type. Valid choices are "CES" (Ganesha NFS) or "KNFS" (Kernel NFS). + + * - ``gpfs_share_export_ip`` = ``None`` + + - (Host address) IP to be added to GPFS export string. + + * - ``gpfs_share_helpers`` = ``KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper, CES=manila.share.drivers.ibm.gpfs.CESHelper`` + + - (List) Specify list of share export helpers. + + * - ``gpfs_ssh_login`` = ``None`` + + - (String) GPFS server SSH login name. + + * - ``gpfs_ssh_password`` = ``None`` + + - (String) GPFS server SSH login password. The password is not needed, if 'gpfs_ssh_private_key' is configured. + + * - ``gpfs_ssh_port`` = ``22`` + + - (Port number) GPFS server SSH port. + + * - ``gpfs_ssh_private_key`` = ``None`` + + - (String) Path to GPFS server SSH private key for login. + + * - ``is_gpfs_node`` = ``False`` + + - (Boolean) True:when Manila services are running on one of the Spectrum Scale node. False:when Manila services are not running on any of the Spectrum Scale node. diff --git a/doc/source/configuration/tables/manila-tegile.inc b/doc/source/configuration/tables/manila-tegile.inc new file mode 100644 index 0000000000..0b5ceb92cc --- /dev/null +++ b/doc/source/configuration/tables/manila-tegile.inc @@ -0,0 +1,28 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-tegile: + +.. list-table:: Description of Tegile share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``tegile_default_project`` = ``None`` + - (String) Create shares in this project + * - ``tegile_nas_login`` = ``None`` + - (String) User name for the Tegile NAS server. + * - ``tegile_nas_password`` = ``None`` + - (String) Password for the Tegile NAS server. + * - ``tegile_nas_server`` = ``None`` + - (String) Tegile NAS server hostname or IP address. diff --git a/doc/source/configuration/tables/manila-unity.inc b/doc/source/configuration/tables/manila-unity.inc new file mode 100644 index 0000000000..f6dd5603e2 --- /dev/null +++ b/doc/source/configuration/tables/manila-unity.inc @@ -0,0 +1,26 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-unity: + +.. list-table:: Description of Dell EMC Unity share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``unity_ethernet_ports`` = ``None`` + - (List) Comma separated list of ports that can be used for share server interfaces. Members of the list can be Unix-style glob expressions. + * - ``unity_server_meta_pool`` = ``None`` + - (String) Pool to persist the meta-data of NAS server. + * - ``unity_share_data_pools`` = ``None`` + - (List) Comma separated list of pools that can be used to persist share data. diff --git a/doc/source/configuration/tables/manila-vmax.inc b/doc/source/configuration/tables/manila-vmax.inc new file mode 100644 index 0000000000..a4dd796fc4 --- /dev/null +++ b/doc/source/configuration/tables/manila-vmax.inc @@ -0,0 +1,26 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-vmax: + +.. list-table:: Description of Dell EMC VMAX share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``vmax_ethernet_ports`` = ``None`` + - (List) Comma separated list of ports that can be used for share server interfaces. Members of the list can be Unix-style glob expressions. + * - ``vmax_server_container`` = ``None`` + - (String) Data mover to host the NAS server. + * - ``vmax_share_data_pools`` = ``None`` + - (List) Comma separated list of pools that can be used to persist share data. diff --git a/doc/source/configuration/tables/manila-vnx.inc b/doc/source/configuration/tables/manila-vnx.inc new file mode 100644 index 0000000000..80dee8cce7 --- /dev/null +++ b/doc/source/configuration/tables/manila-vnx.inc @@ -0,0 +1,26 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-vnx: + +.. list-table:: Description of Dell EMC VNX share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``vnx_ethernet_ports`` = ``None`` + - (List) Comma separated list of ports that can be used for share server interfaces. Members of the list can be Unix-style glob expressions. + * - ``vnx_server_container`` = ``None`` + - (String) Data mover to host the NAS server. + * - ``vnx_share_data_pools`` = ``None`` + - (List) Comma separated list of pools that can be used to persist share data. diff --git a/doc/source/configuration/tables/manila-winrm.inc b/doc/source/configuration/tables/manila-winrm.inc new file mode 100644 index 0000000000..66622177e8 --- /dev/null +++ b/doc/source/configuration/tables/manila-winrm.inc @@ -0,0 +1,34 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-winrm: + +.. list-table:: Description of WinRM configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``winrm_cert_key_pem_path`` = ``~/.ssl/key.pem`` + - (String) Path to the x509 certificate key. + * - ``winrm_cert_pem_path`` = ``~/.ssl/cert.pem`` + - (String) Path to the x509 certificate used for accessing the serviceinstance. + * - ``winrm_conn_timeout`` = ``60`` + - (Integer) WinRM connection timeout. + * - ``winrm_operation_timeout`` = ``60`` + - (Integer) WinRM operation timeout. + * - ``winrm_retry_count`` = ``3`` + - (Integer) WinRM retry count. + * - ``winrm_retry_interval`` = ``5`` + - (Integer) WinRM retry interval in seconds + * - ``winrm_use_cert_based_auth`` = ``False`` + - (Boolean) Use x509 certificates in order to authenticate to theservice instance. diff --git a/doc/source/configuration/tables/manila-zfs.inc b/doc/source/configuration/tables/manila-zfs.inc new file mode 100644 index 0000000000..48823b0205 --- /dev/null +++ b/doc/source/configuration/tables/manila-zfs.inc @@ -0,0 +1,46 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-zfs: + +.. list-table:: Description of ZFS share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``zfs_dataset_creation_options`` = ``None`` + - (List) Define here list of options that should be applied for each dataset creation if needed. Example: compression=gzip,dedup=off. Note that, for secondary replicas option 'readonly' will be set to 'on' and for active replicas to 'off' in any way. Also, 'quota' will be equal to share size. Optional. + * - ``zfs_dataset_name_prefix`` = ``manila_share_`` + - (String) Prefix to be used in each dataset name. Optional. + * - ``zfs_dataset_snapshot_name_prefix`` = ``manila_share_snapshot_`` + - (String) Prefix to be used in each dataset snapshot name. Optional. + * - ``zfs_migration_snapshot_prefix`` = ``tmp_snapshot_for_share_migration_`` + - (String) Set snapshot prefix for usage in ZFS migration. Required. + * - ``zfs_replica_snapshot_prefix`` = ``tmp_snapshot_for_replication_`` + - (String) Set snapshot prefix for usage in ZFS replication. Required. + * - ``zfs_service_ip`` = ``None`` + - (String) IP to be added to admin-facing export location. Required. + * - ``zfs_share_export_ip`` = ``None`` + - (String) IP to be added to user-facing export location. Required. + * - ``zfs_share_helpers`` = ``NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper`` + - (List) Specify list of share export helpers for ZFS storage. It should look like following: 'FOO_protocol=foo.FooClass,BAR_protocol=bar.BarClass'. Required. + * - ``zfs_ssh_private_key_path`` = ``None`` + - (String) Path to SSH private key that should be used for SSH'ing ZFS storage host. Not used for replication operations. Optional. + * - ``zfs_ssh_user_password`` = ``None`` + - (String) Password for user that is used for SSH'ing ZFS storage host. Not used for replication operations. They require passwordless SSH access. Optional. + * - ``zfs_ssh_username`` = ``None`` + - (String) SSH user that will be used in 2 cases: 1) By manila-share service in case it is located on different host than its ZFS storage. 2) By manila-share services with other ZFS backends that perform replication. It is expected that SSH'ing will be key-based, passwordless. This user should be passwordless sudoer. Optional. + * - ``zfs_use_ssh`` = ``False`` + - (Boolean) Remote ZFS storage hostname that should be used for SSH'ing. Optional. + * - ``zfs_zpool_list`` = ``None`` + - (List) Specify list of zpools that are allowed to be used by backend. Can contain nested datasets. Examples: Without nested dataset: 'zpool_name'. With nested dataset: 'zpool_name/nested_dataset_name'. Required. diff --git a/doc/source/configuration/tables/manila-zfssa.inc b/doc/source/configuration/tables/manila-zfssa.inc new file mode 100644 index 0000000000..1543b9cd96 --- /dev/null +++ b/doc/source/configuration/tables/manila-zfssa.inc @@ -0,0 +1,50 @@ +.. + Warning: Do not edit this file. It is automatically generated from the + software project's code and your changes will be overwritten. + + The tool to generate this file lives in openstack-doc-tools repository. + + Please make any changes needed in the code, then run the + autogenerate-config-doc tool from the openstack-doc-tools repository, or + ask for help on the documentation mailing list, IRC channel or meeting. + +.. _manila-zfssa: + +.. list-table:: Description of ZFSSA share driver configuration options + :header-rows: 1 + :class: config-ref-table + + * - Configuration option = Default value + - Description + * - **[DEFAULT]** + - + * - ``zfssa_auth_password`` = ``None`` + - (String) ZFSSA management authorized userpassword. + * - ``zfssa_auth_user`` = ``None`` + - (String) ZFSSA management authorized username. + * - ``zfssa_data_ip`` = ``None`` + - (String) IP address for data. + * - ``zfssa_host`` = ``None`` + - (String) ZFSSA management IP address. + * - ``zfssa_manage_policy`` = ``loose`` + - (String) Driver policy for share manage. A strict policy checks for a schema named manila_managed, and makes sure its value is true. A loose policy does not check for the schema. + * - ``zfssa_nas_checksum`` = ``fletcher4`` + - (String) Controls checksum used for data blocks. + * - ``zfssa_nas_compression`` = ``off`` + - (String) Data compression-off, lzjb, gzip-2, gzip, gzip-9. + * - ``zfssa_nas_logbias`` = ``latency`` + - (String) Controls behavior when servicing synchronous writes. + * - ``zfssa_nas_mountpoint`` = + - (String) Location of project in ZFS/SA. + * - ``zfssa_nas_quota_snap`` = ``true`` + - (String) Controls whether a share quota includes snapshot. + * - ``zfssa_nas_rstchown`` = ``true`` + - (String) Controls whether file ownership can be changed. + * - ``zfssa_nas_vscan`` = ``false`` + - (String) Controls whether the share is scanned for viruses. + * - ``zfssa_pool`` = ``None`` + - (String) ZFSSA storage pool name. + * - ``zfssa_project`` = ``None`` + - (String) ZFSSA project name. + * - ``zfssa_rest_timeout`` = ``None`` + - (String) REST connection timeout (in seconds). diff --git a/doc/source/index.rst b/doc/source/index.rst index 6f187bd40b..54210bce17 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -74,7 +74,7 @@ Configuration Reference .. toctree:: :maxdepth: 1 - configuration/index + configuration/shared-file-systems Other Reference ===============