Merge "Update cinder flags with autogenerate tools"

This commit is contained in:
Jenkins 2013-09-20 21:00:52 +00:00 committed by Gerrit Code Review
commit db82f69bea
23 changed files with 723 additions and 220 deletions

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>
@ -29,11 +29,11 @@
</tr>
<tr>
<td>enable_v1_api=True</td>
<td>(BoolOpt) Deploy v1 of the Cinder API.</td>
<td>(BoolOpt) Deploy v1 of the Cinder API. </td>
</tr>
<tr>
<td>enable_v2_api=True</td>
<td>(BoolOpt) Deploy v2 of the Cinder API.</td>
<td>(BoolOpt) Deploy v2 of the Cinder API. </td>
</tr>
<tr>
<td>osapi_max_limit=1000</td>
@ -89,4 +89,4 @@
</tr>
</tbody>
</table>
</para>
</para>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>
@ -19,30 +19,6 @@
<td>backup_api_class=cinder.backup.api.API</td>
<td>(StrOpt) The full class name of the volume backup API class</td>
</tr>
<tr>
<td>backup_ceph_chunk_size=134217728</td>
<td>(IntOpt) the chunk size in bytes that a backup will be broken into before transfer to backup store</td>
</tr>
<tr>
<td>backup_ceph_conf=/etc/ceph/ceph.conf</td>
<td>(StrOpt) Ceph config file to use.</td>
</tr>
<tr>
<td>backup_ceph_pool=backups</td>
<td>(StrOpt) the Ceph pool to backup to</td>
</tr>
<tr>
<td>backup_ceph_stripe_count=0</td>
<td>(IntOpt) RBD stripe count to use when creating a backup image</td>
</tr>
<tr>
<td>backup_ceph_stripe_unit=0</td>
<td>(IntOpt) RBD stripe unit to use when creating a backup image</td>
</tr>
<tr>
<td>backup_ceph_user=cinder</td>
<td>(StrOpt) the Ceph user to connect with</td>
</tr>
<tr>
<td>backup_driver=cinder.backup.drivers.swift</td>
<td>(StrOpt) Driver to use for backups.</td>
@ -59,6 +35,10 @@
<td>backup_topic=cinder-backup</td>
<td>(StrOpt) the topic volume backup nodes listen on</td>
</tr>
<tr>
<td>restore_discard_excess_bytes=True</td>
<td>(BoolOpt) If True, always discard excess bytes when restoring volumes.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,44 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for backups_ceph</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>backup_ceph_chunk_size=134217728</td>
<td>(IntOpt) the chunk size in bytes that a backup will be broken into before transfer to backup store</td>
</tr>
<tr>
<td>backup_ceph_conf=/etc/ceph/ceph.conf</td>
<td>(StrOpt) Ceph config file to use.</td>
</tr>
<tr>
<td>backup_ceph_pool=backups</td>
<td>(StrOpt) the Ceph pool to backup to</td>
</tr>
<tr>
<td>backup_ceph_stripe_count=0</td>
<td>(IntOpt) RBD stripe count to use when creating a backup image</td>
</tr>
<tr>
<td>backup_ceph_stripe_unit=0</td>
<td>(IntOpt) RBD stripe unit to use when creating a backup image</td>
</tr>
<tr>
<td>backup_ceph_user=cinder</td>
<td>(StrOpt) the Ceph user to connect with</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,56 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for backups_swift</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>backup_compression_algorithm=zlib</td>
<td>(StrOpt) Compression algorithm (None to disable)</td>
</tr>
<tr>
<td>backup_swift_auth=per_user</td>
<td>(StrOpt) Swift authentication mechanism</td>
</tr>
<tr>
<td>backup_swift_container=volumebackups</td>
<td>(StrOpt) The default Swift container to use</td>
</tr>
<tr>
<td>backup_swift_key=None</td>
<td>(StrOpt) Swift key for authentication</td>
</tr>
<tr>
<td>backup_swift_object_size=52428800</td>
<td>(IntOpt) The size in bytes of Swift backup objects</td>
</tr>
<tr>
<td>backup_swift_retry_attempts=3</td>
<td>(IntOpt) The number of retries to make for Swift operations</td>
</tr>
<tr>
<td>backup_swift_retry_backoff=2</td>
<td>(IntOpt) The backoff time in seconds between Swift retries</td>
</tr>
<tr>
<td>backup_swift_url=http://localhost:8080/v1/AUTH_</td>
<td>(StrOpt) The URL of the Swift endpoint</td>
</tr>
<tr>
<td>backup_swift_user=None</td>
<td>(StrOpt) Swift user name</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for backups_tsm</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>backup_tsm_compression=True</td>
<td>(BoolOpt) Enable or Disable compression for backups</td>
</tr>
<tr>
<td>backup_tsm_password=password</td>
<td>(StrOpt) TSM password for the running username</td>
</tr>
<tr>
<td>backup_tsm_volume_prefix=backup</td>
<td>(StrOpt) Volume prefix for the backup id when backing up to TSM</td>
</tr>
</tbody>
</table>
</para>

View File

@ -11,14 +11,22 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>compute_api_class=cinder.compute.nova.API</td>
<td>(StrOpt) The full class name of the compute API class to use</td>
</tr>
<tr>
<td>debug=False</td>
<td>(BoolOpt) Print debugging output (set logging level to DEBUG instead of default WARNING level).</td>
</tr>
<tr>
<td>default_availability_zone=None</td>
<td>(StrOpt) default availability zone to use when creating a new volume. If this is not set then we use the value from the storage_availability_zone option as the default availability_zone for new volumes.</td>
</tr>
<tr>
<td>default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN</td>
<td>(ListOpt) list of logger=LEVEL pairs</td>
@ -28,7 +36,7 @@
<td>(StrOpt) Default notification level for outgoing notifications</td>
</tr>
<tr>
<td>default_publisher_id=$host</td>
<td>default_publisher_id=None</td>
<td>(StrOpt) Default publisher_id for outgoing notifications</td>
</tr>
<tr>
@ -51,10 +59,6 @@
<td>fatal_exception_format_errors=False</td>
<td>(BoolOpt) make exception message format errors fatal</td>
</tr>
<tr>
<td>host=127.0.0.1</td>
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>host=autodoc</td>
<td>(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.</td>
@ -67,6 +71,10 @@
<td>host=autodoc</td>
<td>(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.</td>
</tr>
<tr>
<td>host=127.0.0.1</td>
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>idle_timeout=3600</td>
<td>(IntOpt) timeout before idle sql connections are reaped</td>
@ -121,14 +129,14 @@
</tr>
<tr>
<td>monkey_patch=False</td>
<td>(BoolOpt) Whether to log monkey patching</td>
<td>(BoolOpt) Enable monkey patching</td>
</tr>
<tr>
<td>monkey_patch_modules=</td>
<td>(ListOpt) List of modules/decorators to monkey patch</td>
</tr>
<tr>
<td>my_ip=198.61.167.113</td>
<td>my_ip=192.168.122.175</td>
<td>(StrOpt) ip address of this host</td>
</tr>
<tr>
@ -136,13 +144,37 @@
<td>(BoolOpt) Whether snapshots count against GigaByte quota</td>
</tr>
<tr>
<td>num_iscsi_scan_tries=3</td>
<td>(IntOpt) number of times to rescan iSCSI target to find volume</td>
<td>nova_api_insecure=False</td>
<td>(BoolOpt) Allow to perform insecure SSL requests to nova</td>
</tr>
<tr>
<td>nova_ca_certificates_file=None</td>
<td>(StrOpt) Location of ca certicates file to use for nova client requests.</td>
</tr>
<tr>
<td>nova_catalog_admin_info=compute:nova:adminURL</td>
<td>(StrOpt) Same as nova_catalog_info, but for admin endpoint.</td>
</tr>
<tr>
<td>nova_catalog_info=compute:nova:publicURL</td>
<td>(StrOpt) Info to match when looking for nova in the service catalog. Format is : separated values of the form: &lt;service_type&gt;:&lt;service_name&gt;:&lt;endpoint_type&gt;</td>
</tr>
<tr>
<td>nova_endpoint_admin_template=None</td>
<td>(StrOpt) Same as nova_endpoint_template, but for admin endpoint.</td>
</tr>
<tr>
<td>nova_endpoint_template=None</td>
<td>(StrOpt) Override service catalog lookup with template for nova endpoint e.g. http://localhost:8774/v2/%(tenant_id)s</td>
</tr>
<tr>
<td>num_shell_tries=3</td>
<td>(IntOpt) number of times to attempt to run flakey shell commands</td>
</tr>
<tr>
<td>os_region_name=None</td>
<td>(StrOpt) region name of this node</td>
</tr>
<tr>
<td>password=None</td>
<td>(StrOpt) Password for Redis server. (optional)</td>
@ -164,7 +196,7 @@
<td>(IntOpt) Use this port to connect to redis host.</td>
</tr>
<tr>
<td>pybasedir=/home/stacker/repos/cinder</td>
<td>pybasedir=/usr/lib/python2.7/site-packages</td>
<td>(StrOpt) Directory where the cinder python module is installed</td>
</tr>
<tr>
@ -200,7 +232,7 @@
<td>(StrOpt) Deprecated: command to use for running commands as root</td>
</tr>
<tr>
<td>rootwrap_config=None</td>
<td>rootwrap_config=/etc/cinder/rootwrap.conf</td>
<td>(StrOpt) Path to the rootwrap configuration file to use for running commands as root</td>
</tr>
<tr>
@ -261,7 +293,7 @@
</tr>
<tr>
<td>topics=notifications</td>
<td>(ListOpt) AMQP topic(s) used for openstack notifications</td>
<td>(ListOpt) AMQP topic(s) used for OpenStack notifications</td>
</tr>
<tr>
<td>until_refresh=0</td>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>

View File

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for gpfs_volume</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>gpfs_images_dir=None</td>
<td>(StrOpt) Path to GPFS Glance repository as mounted on Nova nodes</td>
</tr>
<tr>
<td>gpfs_images_share_mode=None</td>
<td>(StrOpt) Set this if Glance image repo is on GPFS as well so that the image bits can be transferred efficiently between Glance and Cinder. Valid values are copy or copy_on_write. copy performs a full copy of the image, copy_on_write efficiently shares unmodified blocks of the image.</td>
</tr>
<tr>
<td>gpfs_max_clone_depth=0</td>
<td>(IntOpt) A lengthy chain of copy-on-write snapshots or clones could have impact on performance. This option limits the number of indirections required to reach a specific block. 0 indicates unlimited.</td>
</tr>
<tr>
<td>gpfs_mount_point_base=None</td>
<td>(StrOpt) Path to the directory on GPFS mount point where volumes are stored</td>
</tr>
<tr>
<td>gpfs_sparse_volumes=True</td>
<td>(BoolOpt) Create volumes as sparse files which take no space. If set to False volume is created as regular file. In this case volume creation may take a significantly longer time.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -11,10 +11,14 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>allowed_direct_url_schemes=</td>
<td>(ListOpt) A list of url schemes that can be downloaded directly via the direct_url. Currently supported schemes: [file].</td>
</tr>
<tr>
<td>glance_api_insecure=False</td>
<td>(BoolOpt) Allow to perform insecure SSL (https) requests to glance</td>
@ -44,35 +48,19 @@
<td>(IntOpt) default glance port</td>
</tr>
<tr>
<td>gpfs_images_dir=None</td>
<td>(StrOpt) Path to GPFS Glance repository as mounted on Nova nodes</td>
<td>glance_request_timeout=None</td>
<td>(IntOpt) http/https timeout value for glance operations. If no value (None) is supplied here, the glanceclient default value is used.</td>
</tr>
<tr>
<td>gpfs_images_share_mode=None</td>
<td>(StrOpt) Set this if Glance image repo is on GPFS as well so that the image bits can be transferred efficiently between Glance and Cinder. Valid values are copy or copy_on_write. copy performs a full copy of the image, copy_on_write efficiently shares unmodified blocks of the image.</td>
<td>image_conversion_dir=$state_path/conversion</td>
<td>(StrOpt) Directory used for temporary storage during image conversion</td>
</tr>
<tr>
<td>gpfs_max_clone_depth=0</td>
<td>(IntOpt) A lengthy chain of copy-on-write snapshots or clones could have impact on performance. This option limits the number of indirections required to reach a specific block. 0 indicates unlimited.</td>
</tr>
<tr>
<td>gpfs_mount_point_base=None</td>
<td>(StrOpt) Path to the directory on GPFS mount point where volumes are stored</td>
</tr>
<tr>
<td>gpfs_sparse_volumes=True</td>
<td>(BoolOpt) Create volumes as sparse files which take no space. If set to False volume is created as regular file. In this case volume creation may take a significantly longer time.</td>
</tr>
<tr>
<td>image_conversion_dir=/tmp</td>
<td>(StrOpt) parent dir for tempdir used for image conversion</td>
</tr>
<tr>
<td>instance_format=[instance: %(uuid)s]</td>
<td>instance_format=[instance: %(uuid)s] </td>
<td>(StrOpt) If an instance is passed with the log message, format it like this</td>
</tr>
<tr>
<td>instance_uuid_format=[instance: %(uuid)s]</td>
<td>instance_uuid_format=[instance: %(uuid)s] </td>
<td>(StrOpt) If an instance UUID is passed with the log message, format it like this</td>
</tr>
<tr>
@ -81,4 +69,4 @@
</tr>
</tbody>
</table>
</para>
</para>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>
@ -19,6 +19,14 @@
<td>allowed_rpc_exception_modules=cinder.openstack.common.exception,nova.exception,cinder.exception,exceptions</td>
<td>(ListOpt) Modules of exceptions that are permitted to be recreatedupon receiving exception data from an rpc call.</td>
</tr>
<tr>
<td>amqp_auto_delete=False</td>
<td>(BoolOpt) Auto-delete queues in amqp.</td>
</tr>
<tr>
<td>amqp_durable_queues=False</td>
<td>(BoolOpt) Use durable queues in amqp.</td>
</tr>
<tr>
<td>amqp_rpc_single_reply_queue=False</td>
<td>(BoolOpt) Enable a fast single reply queue if using AMQP based RPC like RabbitMQ or Qpid.</td>
@ -65,7 +73,7 @@
</tr>
<tr>
<td>notification_topics=notifications</td>
<td>(ListOpt) AMQP topic used for openstack notifications</td>
<td>(ListOpt) AMQP topic used for OpenStack notifications</td>
</tr>
<tr>
<td>publish_errors=False</td>
@ -104,12 +112,12 @@
<td>(BoolOpt) Disable Nagle algorithm</td>
</tr>
<tr>
<td>qpid_username=</td>
<td>(StrOpt) Username for qpid connection</td>
<td>qpid_topology_version=1</td>
<td>(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.</td>
</tr>
<tr>
<td>rabbit_durable_queues=False</td>
<td>(BoolOpt) use durable queues in RabbitMQ</td>
<td>qpid_username=</td>
<td>(StrOpt) Username for qpid connection</td>
</tr>
<tr>
<td>rabbit_ha_queues=False</td>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>
@ -60,25 +60,81 @@
<td>(ListOpt) A list of backend names to use. These backend names should be backed by a unique [CONFIG] group with its options</td>
</tr>
<tr>
<td>glusterfs_disk_util=df</td>
<td>(StrOpt) Use du or df for free space calculation</td>
<td>eqlx_chap_login=admin</td>
<td>(StrOpt) Existing CHAP account name</td>
</tr>
<tr>
<td>glusterfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for gluster shares</td>
<td>eqlx_chap_password=password</td>
<td>(StrOpt) Password for specified CHAP account name</td>
</tr>
<tr>
<td>glusterfs_shares_config=/etc/cinder/glusterfs_shares</td>
<td>(StrOpt) File with the list of available gluster shares</td>
<td>eqlx_cli_max_retries=5</td>
<td>(IntOpt) Maximum retry count for reconnection</td>
</tr>
<tr>
<td>glusterfs_sparsed_volumes=True</td>
<td>(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.</td>
<td>eqlx_cli_timeout=30</td>
<td>(IntOpt) Timeout for the Group Manager cli command execution</td>
</tr>
<tr>
<td>eqlx_group_name=group-0</td>
<td>(StrOpt) Group name to use for creating volumes</td>
</tr>
<tr>
<td>eqlx_pool=default</td>
<td>(StrOpt) Pool in which volumes will be created</td>
</tr>
<tr>
<td>eqlx_use_chap=False</td>
<td>(BoolOpt) Use CHAP authentificaion for targets?</td>
</tr>
<tr>
<td>expiry_thres_minutes=720</td>
<td>(IntOpt) Threshold minutes after which cache file can be cleaned.</td>
</tr>
<tr>
<td>hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml</td>
<td>(StrOpt) configuration file for HDS cinder plugin for HUS</td>
</tr>
<tr>
<td>hp3par_api_url=</td>
<td>(StrOpt) 3PAR WSAPI Server Url like https://&lt;3par ip&gt;:8080/api/v1</td>
</tr>
<tr>
<td>hp3par_cpg=OpenStack</td>
<td>(StrOpt) The CPG to use for volume creation</td>
</tr>
<tr>
<td>hp3par_cpg_snap=</td>
<td>(StrOpt) The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used</td>
</tr>
<tr>
<td>hp3par_debug=False</td>
<td>(BoolOpt) Enable HTTP debugging to 3PAR</td>
</tr>
<tr>
<td>hp3par_domain=None</td>
<td>(StrOpt) This option is DEPRECATED and no longer used. The 3par domain name to use.</td>
</tr>
<tr>
<td>hp3par_iscsi_ips=</td>
<td>(ListOpt) List of target iSCSI addresses to use.</td>
</tr>
<tr>
<td>hp3par_password=</td>
<td>(StrOpt) 3PAR Super user password</td>
</tr>
<tr>
<td>hp3par_snapshot_expiration=</td>
<td>(StrOpt) The time in hours when a snapshot expires and is deleted. This must be larger than expiration</td>
</tr>
<tr>
<td>hp3par_snapshot_retention=</td>
<td>(StrOpt) The time in hours to retain a snapshot. You can't delete it before this expires.</td>
</tr>
<tr>
<td>hp3par_username=</td>
<td>(StrOpt) 3PAR Super user username</td>
</tr>
<tr>
<td>iscsi_helper=tgtadm</td>
<td>(StrOpt) iscsi target user-land tool to use</td>
@ -89,11 +145,11 @@
</tr>
<tr>
<td>iscsi_ip_address=$my_ip</td>
<td>(StrOpt) The port that the iSCSI daemon is listening on</td>
<td>(StrOpt) The IP address that the iSCSI daemon is listening on</td>
</tr>
<tr>
<td>iscsi_num_targets=100</td>
<td>(IntOpt) Number of iscsi target ids per host</td>
<td>(IntOpt) The maximum number of iscsi target ids per host</td>
</tr>
<tr>
<td>iscsi_port=3260</td>
@ -103,10 +159,38 @@
<td>iscsi_target_prefix=iqn.2010-10.org.openstack:</td>
<td>(StrOpt) prefix for iscsi volumes</td>
</tr>
<tr>
<td>iser_helper=tgtadm</td>
<td>(StrOpt) iser target user-land tool to use</td>
</tr>
<tr>
<td>iser_ip_address=$my_ip</td>
<td>(StrOpt) The IP address that the iSER daemon is listening on</td>
</tr>
<tr>
<td>iser_num_targets=100</td>
<td>(IntOpt) The maximum number of iser target ids per host</td>
</tr>
<tr>
<td>iser_port=3260</td>
<td>(IntOpt) The port that the iSER daemon is listening on</td>
</tr>
<tr>
<td>iser_target_prefix=iqn.2010-10.org.iser.openstack:</td>
<td>(StrOpt) prefix for iser volumes</td>
</tr>
<tr>
<td>keymgr_api_class=cinder.keymgr.not_implemented_key_mgr.NotImplementedKeyManager</td>
<td>(StrOpt) The full class name of the key manager API class</td>
</tr>
<tr>
<td>lvm_mirrors=0</td>
<td>(IntOpt) If set, create lvms with multiple mirrors. Note that this requires lvm_mirrors + 2 pvs with available space</td>
</tr>
<tr>
<td>lvm_type=default</td>
<td>(StrOpt) Type of LVM volumes to deploy; (default or thin)</td>
</tr>
<tr>
<td>max_age=0</td>
<td>(IntOpt) number of seconds between subsequent usage refreshes</td>
@ -139,10 +223,6 @@
<td>min_pool_size=1</td>
<td>(IntOpt) Minimum number of SQL connections to keep open in a pool</td>
</tr>
<tr>
<td>netapp_login=None</td>
<td>(StrOpt) User name for the storage controller</td>
</tr>
<tr>
<td>netapp_password=None</td>
<td>(StrOpt) Password for the storage controller</td>
@ -171,85 +251,21 @@
<td>netapp_transport_type=http</td>
<td>(StrOpt) Transport type protocol</td>
</tr>
<tr>
<td>netapp_vfiler=None</td>
<td>(StrOpt) Vfiler to use for provisioning</td>
</tr>
<tr>
<td>netapp_volume_list=None</td>
<td>(StrOpt) Comma separated volumes to be used for provisioning</td>
</tr>
<tr>
<td>netapp_vserver=openstack</td>
<td>netapp_vserver=None</td>
<td>(StrOpt) Cluster vserver to use for provisioning</td>
</tr>
<tr>
<td>nexenta_blocksize=</td>
<td>(StrOpt) block size for volumes (blank=default,8KB)</td>
<td>num_iser_scan_tries=3</td>
<td>(IntOpt) The maximum number of times to rescan iSER targetto find volume</td>
</tr>
<tr>
<td>nexenta_host=</td>
<td>(StrOpt) IP address of Nexenta SA</td>
</tr>
<tr>
<td>nexenta_iscsi_target_portal_port=3260</td>
<td>(IntOpt) Nexenta target portal port</td>
</tr>
<tr>
<td>nexenta_password=nexenta</td>
<td>(StrOpt) Password to connect to Nexenta SA</td>
</tr>
<tr>
<td>nexenta_rest_port=2000</td>
<td>(IntOpt) HTTP port to connect to Nexenta REST API server</td>
</tr>
<tr>
<td>nexenta_rest_protocol=auto</td>
<td>(StrOpt) Use http or https for REST connection (default auto)</td>
</tr>
<tr>
<td>nexenta_sparse=False</td>
<td>(BoolOpt) flag to create sparse volumes</td>
</tr>
<tr>
<td>nexenta_target_group_prefix=cinder/</td>
<td>(StrOpt) prefix for iSCSI target groups on SA</td>
</tr>
<tr>
<td>nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder-</td>
<td>(StrOpt) IQN prefix for iSCSI targets</td>
</tr>
<tr>
<td>nexenta_user=admin</td>
<td>(StrOpt) User name to connect to Nexenta SA</td>
</tr>
<tr>
<td>nexenta_volume=cinder</td>
<td>(StrOpt) pool on SA that will hold all volumes</td>
</tr>
<tr>
<td>nfs_mount_options=None</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details</td>
</tr>
<tr>
<td>nfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for nfs shares</td>
</tr>
<tr>
<td>nfs_oversub_ratio=1.0</td>
<td>(FloatOpt) This will compare the allocated to available space on the volume destination. If the ratio exceeds this number, the destination will no longer be valid.</td>
</tr>
<tr>
<td>nfs_shares_config=/etc/cinder/nfs_shares</td>
<td>(StrOpt) File with the list of available nfs shares</td>
</tr>
<tr>
<td>nfs_sparsed_volumes=True</td>
<td>(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.</td>
</tr>
<tr>
<td>nfs_used_ratio=0.95</td>
<td>(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination.</td>
<td>num_volume_device_scan_tries=3</td>
<td>(IntOpt) The maximum number of times to rescan targetsto find volume</td>
</tr>
<tr>
<td>rbd_ceph_conf=</td>
@ -259,6 +275,10 @@
<td>rbd_flatten_volume_from_snapshot=False</td>
<td>(BoolOpt) flatten volumes created from snapshots to remove dependency</td>
</tr>
<tr>
<td>rbd_max_clone_depth=5</td>
<td>(IntOpt) maximum number of nested clones that can be taken of a volume before enforcing a flatten prior to next clone. A value of zero disables cloning</td>
</tr>
<tr>
<td>rbd_pool=rbd</td>
<td>(StrOpt) the RADOS pool in which rbd volumes are stored</td>
@ -327,6 +347,10 @@
<td>sf_allow_tenant_qos=False</td>
<td>(BoolOpt) Allow tenants to specify QOS on create</td>
</tr>
<tr>
<td>sf_api_port=443</td>
<td>(IntOpt) SolidFire API port. Useful if the device api is behind a proxy on a different port.</td>
</tr>
<tr>
<td>sf_emulate_512=True</td>
<td>(BoolOpt) Set 512 byte emulation on volume creation; </td>
@ -337,7 +361,7 @@
</tr>
<tr>
<td>storwize_svc_flashcopy_timeout=120</td>
<td>(IntOpt) Maximum number of seconds to wait for FlashCopy to be prepared. Maximum value is 600 seconds (10 minutes).</td>
<td>(IntOpt) Maximum number of seconds to wait for FlashCopy to be prepared. Maximum value is 600 seconds (10 minutes)</td>
</tr>
<tr>
<td>storwize_svc_multihostmap_enabled=True</td>
@ -345,7 +369,7 @@
</tr>
<tr>
<td>storwize_svc_multipath_enabled=False</td>
<td>(BoolOpt) Connect with multipath (currently FC-only)</td>
<td>(BoolOpt) Connect with multipath (FC only; iSCSI multipath is controlled by Nova)</td>
</tr>
<tr>
<td>storwize_svc_vol_autoexpand=True</td>
@ -363,6 +387,10 @@
<td>storwize_svc_vol_grainsize=256</td>
<td>(IntOpt) Storage system grain size parameter for volumes (32/64/128/256)</td>
</tr>
<tr>
<td>storwize_svc_vol_iogrp=0</td>
<td>(IntOpt) The I/O group in which to allocate volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_rsize=2</td>
<td>(IntOpt) Storage system space-efficiency parameter for volumes (percentage)</td>
@ -375,13 +403,21 @@
<td>storwize_svc_volpool_name=volpool</td>
<td>(StrOpt) Storage system storage pool for volumes</td>
</tr>
<tr>
<td>thres_avl_size_perc_start=20</td>
<td>(IntOpt) Threshold available percent to start cache cleaning.</td>
</tr>
<tr>
<td>thres_avl_size_perc_stop=60</td>
<td>(IntOpt) Threshold available percent to stop cache cleaning.</td>
</tr>
<tr>
<td>volume_backend_name=None</td>
<td>(StrOpt) The backend name for a given driver implementation</td>
</tr>
<tr>
<td>volume_clear=zero</td>
<td>(StrOpt) Method used to wipe old volumes (valid options are: none, zero, shred)</td>
<td>(StrOpt) Method used to wipe old voumes (valid options are: none, zero, shred)</td>
</tr>
<tr>
<td>volume_clear_size=0</td>
@ -436,16 +472,12 @@
<td>(StrOpt) Path to store VHD backed volumes</td>
</tr>
<tr>
<td>xiv_proxy=xiv_openstack.nova_proxy.XIVNovaProxy</td>
<td>(StrOpt) Proxy driver</td>
<td>xiv_ds8k_connection_type=iscsi</td>
<td>(StrOpt) Connection type to the IBM Storage Array (fibre_channel|iscsi)</td>
</tr>
<tr>
<td>zadara_default_cache_policy=write-through</td>
<td>(StrOpt) Default cache policy for volumes</td>
</tr>
<tr>
<td>zadara_default_encryption=NO</td>
<td>(StrOpt) Default encryption policy for volumes</td>
<td>xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy</td>
<td>(StrOpt) Proxy driver that connects to the IBM Storage Array</td>
</tr>
<tr>
<td>zadara_default_stripesize=64</td>
@ -463,10 +495,18 @@
<td>zadara_user=None</td>
<td>(StrOpt) User name for the VPSA</td>
</tr>
<tr>
<td>zadara_vol_encrypt=False</td>
<td>(BoolOpt) Default encryption policy for volumes</td>
</tr>
<tr>
<td>zadara_vol_name_template=OS_%s</td>
<td>(StrOpt) Default template for VPSA volume names</td>
</tr>
<tr>
<td>zadara_vol_thin=True</td>
<td>(BoolOpt) Default thin provisioning policy for volumes</td>
</tr>
<tr>
<td>zadara_vpsa_allow_nonexistent_delete=True</td>
<td>(BoolOpt) Don't halt on deletion of non-existing volumes</td>

View File

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for storage_glusterfs</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>glusterfs_disk_util=df</td>
<td>(StrOpt) Use du or df for free space calculation</td>
</tr>
<tr>
<td>glusterfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for gluster shares</td>
</tr>
<tr>
<td>glusterfs_qcow2_volumes=False</td>
<td>(BoolOpt) Create volumes as QCOW2 files rather than raw files.</td>
</tr>
<tr>
<td>glusterfs_shares_config=/etc/cinder/glusterfs_shares</td>
<td>(StrOpt) File with the list of available gluster shares</td>
</tr>
<tr>
<td>glusterfs_sparsed_volumes=True</td>
<td>(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for storage_netapp</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>netapp_login=None</td>
<td>(StrOpt) User name for the storage controller</td>
</tr>
<tr>
<td>netapp_vfiler=None</td>
<td>(StrOpt) Vfiler to use for provisioning</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,64 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for storage_nexenta_iscsi</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>nexenta_blocksize=</td>
<td>(StrOpt) block size for volumes (blank=default,8KB)</td>
</tr>
<tr>
<td>nexenta_host=</td>
<td>(StrOpt) IP address of Nexenta SA</td>
</tr>
<tr>
<td>nexenta_iscsi_target_portal_port=3260</td>
<td>(IntOpt) Nexenta target portal port</td>
</tr>
<tr>
<td>nexenta_password=nexenta</td>
<td>(StrOpt) Password to connect to Nexenta SA</td>
</tr>
<tr>
<td>nexenta_rest_port=2000</td>
<td>(IntOpt) HTTP port to connect to Nexenta REST API server</td>
</tr>
<tr>
<td>nexenta_rest_protocol=auto</td>
<td>(StrOpt) Use http or https for REST connection (default auto)</td>
</tr>
<tr>
<td>nexenta_sparse=False</td>
<td>(BoolOpt) flag to create sparse volumes</td>
</tr>
<tr>
<td>nexenta_target_group_prefix=cinder/</td>
<td>(StrOpt) prefix for iSCSI target groups on SA</td>
</tr>
<tr>
<td>nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder-</td>
<td>(StrOpt) IQN prefix for iSCSI targets</td>
</tr>
<tr>
<td>nexenta_user=admin</td>
<td>(StrOpt) User name to connect to Nexenta SA</td>
</tr>
<tr>
<td>nexenta_volume=cinder</td>
<td>(StrOpt) pool on SA that will hold all volumes</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,48 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for storage_nexenta_nfs</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>nexenta_mount_options=None</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details</td>
</tr>
<tr>
<td>nexenta_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for nfs shares</td>
</tr>
<tr>
<td>nexenta_oversub_ratio=1.0</td>
<td>(FloatOpt) This will compare the allocated to available space on the volume destination. If the ratio exceeds this number, the destination will no longer be valid.</td>
</tr>
<tr>
<td>nexenta_shares_config=/etc/cinder/nfs_shares</td>
<td>(StrOpt) File with the list of available nfs shares</td>
</tr>
<tr>
<td>nexenta_sparsed_volumes=True</td>
<td>(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.</td>
</tr>
<tr>
<td>nexenta_used_ratio=0.95</td>
<td>(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination.</td>
</tr>
<tr>
<td>nexenta_volume_compression=on</td>
<td>(StrOpt) Default compression value for new ZFS folders.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,44 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for storage_nfs</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>nfs_mount_options=None</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details</td>
</tr>
<tr>
<td>nfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for nfs shares</td>
</tr>
<tr>
<td>nfs_oversub_ratio=1.0</td>
<td>(FloatOpt) This will compare the allocated to available space on the volume destination. If the ratio exceeds this number, the destination will no longer be valid.</td>
</tr>
<tr>
<td>nfs_shares_config=/etc/cinder/nfs_shares</td>
<td>(StrOpt) File with the list of available nfs shares</td>
</tr>
<tr>
<td>nfs_sparsed_volumes=True</td>
<td>(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.</td>
</tr>
<tr>
<td>nfs_used_ratio=0.95</td>
<td>(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -11,7 +11,7 @@
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
<td>Description</td>
</tr>
</thead>
<tbody>

View File

@ -1,4 +1,7 @@
allowed_direct_url_schemes images
allowed_rpc_exception_modules rpc
amqp_auto_delete rpc
amqp_durable_queues rpc
amqp_rpc_single_reply_queue rpc
api_paste_config api
api_rate_limit api
@ -8,20 +11,33 @@ backdoor_port api
backend storage
backlog log
backup_api_class backups
backup_ceph_chunk_size backups
backup_ceph_conf backups
backup_ceph_pool backups
backup_ceph_stripe_count backups
backup_ceph_stripe_unit backups
backup_ceph_user backups
backup_ceph_chunk_size backups_ceph
backup_ceph_conf backups_ceph
backup_ceph_pool backups_ceph
backup_ceph_stripe_count backups_ceph
backup_ceph_stripe_unit backups_ceph
backup_ceph_user backups_ceph
backup_compression_algorithm backups_swift
backup_driver backups
backup_manager backups
backup_name_template backups
backup_swift_auth backups_swift
backup_swift_container backups_swift
backup_swift_key backups_swift
backup_swift_object_size backups_swift
backup_swift_retry_attempts backups_swift
backup_swift_retry_backoff backups_swift
backup_swift_url backups_swift
backup_swift_user backups_swift
backup_topic backups
backup_tsm_compression backups_tsm
backup_tsm_password backups_tsm
backup_tsm_volume_prefix backups_tsm
bindir storage
capacity_weight_multiplier storage
cinder_huawei_conf_file storage
cloned_volume_same_az zones
compute_api_class common
connection connection
connection_debug connection
connection_trace connection
@ -35,6 +51,7 @@ coraid_user storage
db_backend database
db_driver database
debug common
default_availability_zone common
default_log_levels common
default_notification_level common
default_publisher_id common
@ -44,6 +61,14 @@ enable_new_services common
enable_v1_api api
enable_v2_api api
enabled_backends storage
eqlx_chap_login storage
eqlx_chap_password storage
eqlx_cli_max_retries storage
eqlx_cli_timeout storage
eqlx_group_name storage
eqlx_pool storage
eqlx_use_chap storage
expiry_thres_minutes storage
fake_rabbit rpc
fatal_deprecations common
fatal_exception_format_errors common
@ -54,18 +79,30 @@ glance_api_version images
glance_host images
glance_num_retries images
glance_port images
glusterfs_disk_util storage
glusterfs_mount_point_base storage
glusterfs_shares_config storage
glusterfs_sparsed_volumes storage
gpfs_images_dir images
gpfs_images_share_mode images
gpfs_max_clone_depth images
gpfs_mount_point_base images
gpfs_sparse_volumes images
glance_request_timeout images
glusterfs_disk_util storage_glusterfs
glusterfs_mount_point_base storage_glusterfs
glusterfs_qcow2_volumes storage_glusterfs
glusterfs_shares_config storage_glusterfs
glusterfs_sparsed_volumes storage_glusterfs
gpfs_images_dir gpfs_volume
gpfs_images_share_mode gpfs_volume
gpfs_max_clone_depth gpfs_volume
gpfs_mount_point_base gpfs_volume
gpfs_sparse_volumes gpfs_volume
hds_cinder_config_file storage
host common
host common
hp3par_api_url storage
hp3par_cpg storage
hp3par_cpg_snap storage
hp3par_debug storage
hp3par_domain storage
hp3par_iscsi_ips storage
hp3par_password storage
hp3par_snapshot_expiration storage
hp3par_snapshot_retention storage
hp3par_username storage
idle_timeout common
iet_conf common
image_conversion_dir images
@ -77,17 +114,17 @@ iscsi_ip_address storage
iscsi_num_targets storage
iscsi_port storage
iscsi_target_prefix storage
iser_target_prefix Unknown
iser_helper storage
iser_ip_address storage
iser_num_targets storage
iser_port storage
iser_target_prefix storage
keymgr_api_class storage
kombu_ssl_ca_certs rpc
iser_port Unknown
kombu_ssl_certfile rpc
iser_num_targets Unknown
kombu_ssl_keyfile rpc
iser_ip_address Unknown
kombu_ssl_version rpc
keymgr_api_class Unknown
lio_initiator_iqns common
iser_helper Unknown
lock_path common
log_config common
log_date_format common
@ -99,6 +136,7 @@ logging_debug_format_suffix common
logging_default_format_string common
logging_exception_prefix common
lvm_mirrors storage
lvm_type storage
matchmaker_heartbeat_freq rpc
matchmaker_heartbeat_ttl rpc
matchmaker_ringfile rpc
@ -113,7 +151,7 @@ min_pool_size storage
monkey_patch common
monkey_patch_modules common
my_ip common
netapp_login storage
netapp_login storage_netapp
netapp_password storage
netapp_server_hostname storage
netapp_server_port storage
@ -121,32 +159,46 @@ netapp_size_multiplier storage
netapp_storage_family storage
netapp_storage_protocol storage
netapp_transport_type storage
netapp_vfiler storage
netapp_vfiler storage_netapp
netapp_volume_list storage
netapp_vserver storage
nexenta_blocksize storage
nexenta_host storage
nexenta_iscsi_target_portal_port storage
nexenta_password storage
nexenta_rest_port storage
nexenta_rest_protocol storage
nexenta_sparse storage
nexenta_target_group_prefix storage
nexenta_target_prefix storage
nexenta_user storage
nexenta_volume storage
nfs_mount_options storage
nfs_mount_point_base storage
nfs_oversub_ratio storage
nfs_shares_config storage
nfs_sparsed_volumes storage
nfs_used_ratio storage
nexenta_blocksize storage_nexenta_iscsi
nexenta_host storage_nexenta_iscsi
nexenta_iscsi_target_portal_port storage_nexenta_iscsi
nexenta_mount_options storage_nexenta_nfs
nexenta_mount_point_base storage_nexenta_nfs
nexenta_oversub_ratio storage_nexenta_nfs
nexenta_password storage_nexenta_iscsi
nexenta_rest_port storage_nexenta_iscsi
nexenta_rest_protocol storage_nexenta_iscsi
nexenta_shares_config storage_nexenta_nfs
nexenta_sparse storage_nexenta_iscsi
nexenta_sparsed_volumes storage_nexenta_nfs
nexenta_target_group_prefix storage_nexenta_iscsi
nexenta_target_prefix storage_nexenta_iscsi
nexenta_used_ratio storage_nexenta_nfs
nexenta_user storage_nexenta_iscsi
nexenta_volume storage_nexenta_iscsi
nexenta_volume_compression storage_nexenta_nfs
nfs_mount_options storage_nfs
nfs_mount_point_base storage_nfs
nfs_oversub_ratio storage_nfs
nfs_shares_config storage_nfs
nfs_sparsed_volumes storage_nfs
nfs_used_ratio storage_nfs
no_snapshot_gb_quota common
notification_driver rpc
notification_topics rpc
num_iscsi_scan_tries common
num_iser_scan_tries Unknown
nova_api_insecure common
nova_ca_certificates_file common
nova_catalog_admin_info common
nova_catalog_info common
nova_endpoint_admin_template common
nova_endpoint_template common
num_iser_scan_tries storage
num_shell_tries common
num_volume_device_scan_tries storage
os_region_name common
osapi_max_limit api
osapi_max_request_body_size api
osapi_volume_base_URL api
@ -167,12 +219,12 @@ qpid_port rpc
qpid_protocol rpc
qpid_sasl_mechanisms rpc
qpid_tcp_nodelay rpc
qpid_topology_version rpc
qpid_username rpc
quota_driver common
quota_gigabytes common
quota_snapshots common
quota_volumes common
rabbit_durable_queues rpc
rabbit_ha_queues rpc
rabbit_host rpc
rabbit_hosts rpc
@ -186,11 +238,13 @@ rabbit_userid rpc
rabbit_virtual_host rpc
rbd_ceph_conf storage
rbd_flatten_volume_from_snapshot storage
rbd_max_clone_depth storage
rbd_pool storage
rbd_secret_uuid storage
rbd_user storage
reservation_expire common
reserved_percentage common
restore_discard_excess_bytes backups
retry_interval common
root_helper common
rootwrap_config common
@ -230,6 +284,7 @@ scheduler_topic scheduler
service_down_time common
sf_account_prefix storage
sf_allow_tenant_qos storage
sf_api_port storage
sf_emulate_512 storage
snapshot_name_template backup
snapshot_same_host backup
@ -251,11 +306,14 @@ storwize_svc_vol_autoexpand storage
storwize_svc_vol_compression storage
storwize_svc_vol_easytier storage
storwize_svc_vol_grainsize storage
storwize_svc_vol_iogrp storage
storwize_svc_vol_rsize storage
storwize_svc_vol_warning storage
storwize_svc_volpool_name storage
syslog_log_facility common
tcp_keepidle common
thres_avl_size_perc_start storage
thres_avl_size_perc_stop storage
topics common
transfer_api_class api
until_refresh common
@ -288,14 +346,15 @@ xenapi_connection_username api
xenapi_nfs_server api
xenapi_nfs_serverpath api
xenapi_sr_base_path api
xiv_proxy storage
zadara_default_cache_policy storage
zadara_default_encryption storage
xiv_ds8k_connection_type storage
xiv_ds8k_proxy storage
zadara_default_stripesize storage
zadara_default_striping_mode storage
zadara_password storage
zadara_user storage
zadara_vol_encrypt storage
zadara_vol_name_template storage
zadara_vol_thin storage
zadara_vpsa_allow_nonexistent_delete storage
zadara_vpsa_auto_detach_on_delete storage
zadara_vpsa_ip storage