Add Autogenerate-config-doc code to tools

This patch brings the auto-generate-config doc tool into
the openstack-manuals repo, where it can be used to update
the configuration option tables.

patchset2 fixes obvious things noted by reviewers, likely
needs more fixing.

patchset 4 adds update feature, fixes pep8

patchset 5 removes debugging print

patchset 6 adds warning to tables regarding their
           automatically generated nature

patchset 7 updated nova flagmappings for H2 and
           adds updated nova tables

patchset 8 adds a seciton in the readme with a worked example
           from a recent use of the script

patchset 10 fixes for cinder
patchset 11 testing, fixing
patchset 12 testing, fixing
patchset 13 adds categories to cinder.flagmappings, generates
            docbook tables for cinder
patchset 14 adds mappings for neutron, and generates docbook
            tables based on these

patchset 15 adds list of bugs this patch will fix

patchset 16 fixes whitespace error in automatically generated tables :(

fixes bug 1192225
Partial-Bug 1207550
Partial-Bug 1207549
fixes bug 1206827
Partial-Bug 1206336
fixes bug 1204484
Partial-Bug 1204205
fixes bug 1202260
fixes bug 1201710
fixes bug 1200794
Partial-Bug 1200740
Partial-Bug 1200418
Partial-Bug 1200047
Partial-Bug 1199209
Partial-Bug 1197657
fixes bug 1197653
fixes bug 1197295
Partial-Bug 1197088
Partial-Bug 1195900
fixes bug 1195560
fixes bug 1195433
Partial-Bug 1192752
fixes bug 1187278
Partial-Bug 1106428
Change-Id: Icd3f4496850c375c9359a10eddb25ab5c722595e
bp:autogenerate-config-tables
This commit is contained in:
Tom Fifield 2013-07-05 11:07:55 +10:00
parent d9668ef862
commit eae9f5b9b5
81 changed files with 4419 additions and 52 deletions

View File

@ -0,0 +1,92 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for api</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>api_paste_config=api-paste.ini</td>
<td>(StrOpt) File name for the paste.deploy config for cinder-api</td>
</tr>
<tr>
<td>api_rate_limit=True</td>
<td>(BoolOpt) whether to rate limit the api</td>
</tr>
<tr>
<td>backdoor_port=None</td>
<td>(IntOpt) port for eventlet backdoor to listen</td>
</tr>
<tr>
<td>enable_v1_api=True</td>
<td>(BoolOpt) Deploy v1 of the Cinder API.</td>
</tr>
<tr>
<td>enable_v2_api=True</td>
<td>(BoolOpt) Deploy v2 of the Cinder API.</td>
</tr>
<tr>
<td>osapi_max_limit=1000</td>
<td>(IntOpt) the maximum number of items returned in a single response from a collection resource</td>
</tr>
<tr>
<td>osapi_max_request_body_size=114688</td>
<td>(IntOpt) Max size for body of a request</td>
</tr>
<tr>
<td>osapi_volume_base_URL=None</td>
<td>(StrOpt) Base URL that will be presented to users in links to the OpenStack Volume API</td>
</tr>
<tr>
<td>osapi_volume_ext_list=</td>
<td>(ListOpt) Specify list of extensions to load when using osapi_volume_extension option with cinder.api.contrib.select_extensions</td>
</tr>
<tr>
<td>osapi_volume_extension=['cinder.api.contrib.standard_extensions']</td>
<td>(MultiStrOpt) osapi volume extension to load</td>
</tr>
<tr>
<td>transfer_api_class=cinder.transfer.api.API</td>
<td>(StrOpt) The full class name of the volume transfer API class</td>
</tr>
<tr>
<td>volume_api_class=cinder.volume.api.API</td>
<td>(StrOpt) The full class name of the volume API class to use</td>
</tr>
<tr>
<td>xenapi_connection_password=None</td>
<td>(StrOpt) Password for XenAPI connection</td>
</tr>
<tr>
<td>xenapi_connection_url=None</td>
<td>(StrOpt) URL for XenAPI connection</td>
</tr>
<tr>
<td>xenapi_connection_username=root</td>
<td>(StrOpt) Username for XenAPI connection</td>
</tr>
<tr>
<td>xenapi_nfs_server=None</td>
<td>(StrOpt) NFS server to be used by XenAPINFSDriver</td>
</tr>
<tr>
<td>xenapi_nfs_serverpath=None</td>
<td>(StrOpt) Path of exported NFS, used by XenAPINFSDriver</td>
</tr>
<tr>
<td>xenapi_sr_base_path=/var/run/sr-mount</td>
<td>(StrOpt) Base path to the storage repository</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for auth</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>auth_strategy=noauth</td>
<td>(StrOpt) The strategy to use for auth. Supports noauth, keystone, and deprecated.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for backup</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>snapshot_name_template=snapshot-%s</td>
<td>(StrOpt) Template string to be used to generate snapshot names</td>
</tr>
<tr>
<td>snapshot_same_host=True</td>
<td>(BoolOpt) Create volume from snapshot at the host where snapshot resides</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,64 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for backups</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>backup_api_class=cinder.backup.api.API</td>
<td>(StrOpt) The full class name of the volume backup API class</td>
</tr>
<tr>
<td>backup_ceph_chunk_size=134217728</td>
<td>(IntOpt) the chunk size in bytes that a backup will be broken into before transfer to backup store</td>
</tr>
<tr>
<td>backup_ceph_conf=/etc/ceph/ceph.conf</td>
<td>(StrOpt) Ceph config file to use.</td>
</tr>
<tr>
<td>backup_ceph_pool=backups</td>
<td>(StrOpt) the Ceph pool to backup to</td>
</tr>
<tr>
<td>backup_ceph_stripe_count=0</td>
<td>(IntOpt) RBD stripe count to use when creating a backup image</td>
</tr>
<tr>
<td>backup_ceph_stripe_unit=0</td>
<td>(IntOpt) RBD stripe unit to use when creating a backup image</td>
</tr>
<tr>
<td>backup_ceph_user=cinder</td>
<td>(StrOpt) the Ceph user to connect with</td>
</tr>
<tr>
<td>backup_driver=cinder.backup.drivers.swift</td>
<td>(StrOpt) Driver to use for backups.</td>
</tr>
<tr>
<td>backup_manager=cinder.backup.manager.BackupManager</td>
<td>(StrOpt) full class name for the Manager for volume backup</td>
</tr>
<tr>
<td>backup_name_template=backup-%s</td>
<td>(StrOpt) Template string to be used to generate backup names</td>
</tr>
<tr>
<td>backup_topic=cinder-backup</td>
<td>(StrOpt) the topic volume backup nodes listen on</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,296 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for common</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>debug=False</td>
<td>(BoolOpt) Print debugging output (set logging level to DEBUG instead of default WARNING level).</td>
</tr>
<tr>
<td>default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN</td>
<td>(ListOpt) list of logger=LEVEL pairs</td>
</tr>
<tr>
<td>default_notification_level=INFO</td>
<td>(StrOpt) Default notification level for outgoing notifications</td>
</tr>
<tr>
<td>default_publisher_id=$host</td>
<td>(StrOpt) Default publisher_id for outgoing notifications</td>
</tr>
<tr>
<td>default_volume_type=None</td>
<td>(StrOpt) default volume type to use</td>
</tr>
<tr>
<td>disable_process_locking=False</td>
<td>(BoolOpt) Whether to disable inter-process locks</td>
</tr>
<tr>
<td>enable_new_services=True</td>
<td>(BoolOpt) Services to be added to the available pool on create</td>
</tr>
<tr>
<td>fatal_deprecations=False</td>
<td>(BoolOpt) make deprecations fatal</td>
</tr>
<tr>
<td>fatal_exception_format_errors=False</td>
<td>(BoolOpt) make exception message format errors fatal</td>
</tr>
<tr>
<td>host=127.0.0.1</td>
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>host=autodoc</td>
<td>(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.</td>
</tr>
<tr>
<td>host=127.0.0.1</td>
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>host=autodoc</td>
<td>(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.</td>
</tr>
<tr>
<td>idle_timeout=3600</td>
<td>(IntOpt) timeout before idle sql connections are reaped</td>
</tr>
<tr>
<td>iet_conf=/etc/iet/ietd.conf</td>
<td>(StrOpt) IET configuration file</td>
</tr>
<tr>
<td>lio_initiator_iqns=</td>
<td>(StrOpt) Comma-separatd list of initiator IQNs allowed to connect to the iSCSI target. (From Nova compute nodes.)</td>
</tr>
<tr>
<td>lock_path=None</td>
<td>(StrOpt) Directory to use for lock files. Default to a temp directory</td>
</tr>
<tr>
<td>log_config=None</td>
<td>(StrOpt) If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files.</td>
</tr>
<tr>
<td>log_date_format=%Y-%m-%d %H:%M:%S</td>
<td>(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s</td>
</tr>
<tr>
<td>log_dir=None</td>
<td>(StrOpt) (Optional) The base directory used for relative --log-file paths</td>
</tr>
<tr>
<td>log_file=None</td>
<td>(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout.</td>
</tr>
<tr>
<td>log_format=None</td>
<td>(StrOpt) A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead.</td>
</tr>
<tr>
<td>logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s</td>
<td>(StrOpt) format string to use for log messages with context</td>
</tr>
<tr>
<td>logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d</td>
<td>(StrOpt) data to append to log format when level is DEBUG</td>
</tr>
<tr>
<td>logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s</td>
<td>(StrOpt) format string to use for log messages without context</td>
</tr>
<tr>
<td>logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s</td>
<td>(StrOpt) prefix each line of exception output with this format</td>
</tr>
<tr>
<td>monkey_patch=False</td>
<td>(BoolOpt) Whether to log monkey patching</td>
</tr>
<tr>
<td>monkey_patch_modules=</td>
<td>(ListOpt) List of modules/decorators to monkey patch</td>
</tr>
<tr>
<td>my_ip=198.61.167.113</td>
<td>(StrOpt) ip address of this host</td>
</tr>
<tr>
<td>no_snapshot_gb_quota=False</td>
<td>(BoolOpt) Whether snapshots count against GigaByte quota</td>
</tr>
<tr>
<td>num_iscsi_scan_tries=3</td>
<td>(IntOpt) number of times to rescan iSCSI target to find volume</td>
</tr>
<tr>
<td>num_shell_tries=3</td>
<td>(IntOpt) number of times to attempt to run flakey shell commands</td>
</tr>
<tr>
<td>password=None</td>
<td>(StrOpt) Password for Redis server. (optional)</td>
</tr>
<tr>
<td>policy_default_rule=default</td>
<td>(StrOpt) Rule checked when requested rule is not found</td>
</tr>
<tr>
<td>policy_file=policy.json</td>
<td>(StrOpt) JSON file representing policy</td>
</tr>
<tr>
<td>pool_size=None</td>
<td>(StrOpt) Size of thin provisioning pool (None uses entire cinder VG)</td>
</tr>
<tr>
<td>port=6379</td>
<td>(IntOpt) Use this port to connect to redis host.</td>
</tr>
<tr>
<td>pybasedir=/home/stacker/repos/cinder</td>
<td>(StrOpt) Directory where the cinder python module is installed</td>
</tr>
<tr>
<td>quota_driver=cinder.quota.DbQuotaDriver</td>
<td>(StrOpt) default driver to use for quota checks</td>
</tr>
<tr>
<td>quota_gigabytes=1000</td>
<td>(IntOpt) number of volume gigabytes (snapshots are also included) allowed per project</td>
</tr>
<tr>
<td>quota_snapshots=10</td>
<td>(IntOpt) number of volume snapshots allowed per project</td>
</tr>
<tr>
<td>quota_volumes=10</td>
<td>(IntOpt) number of volumes allowed per project</td>
</tr>
<tr>
<td>reservation_expire=86400</td>
<td>(IntOpt) number of seconds until a reservation expires</td>
</tr>
<tr>
<td>reserved_percentage=0</td>
<td>(IntOpt) The percentage of backend capacity is reserved</td>
</tr>
<tr>
<td>retry_interval=10</td>
<td>(IntOpt) interval between retries of opening a sql connection</td>
</tr>
<tr>
<td>root_helper=sudo</td>
<td>(StrOpt) Deprecated: command to use for running commands as root</td>
</tr>
<tr>
<td>rootwrap_config=None</td>
<td>(StrOpt) Path to the rootwrap configuration file to use for running commands as root</td>
</tr>
<tr>
<td>run_external_periodic_tasks=True</td>
<td>(BoolOpt) Some periodic tasks can be run in a separate process. Should we run them here?</td>
</tr>
<tr>
<td>service_down_time=60</td>
<td>(IntOpt) maximum time since last check-in for up service</td>
</tr>
<tr>
<td>sqlite_db=cinder.sqlite</td>
<td>(StrOpt) the filename to use with sqlite</td>
</tr>
<tr>
<td>sqlite_synchronous=True</td>
<td>(BoolOpt) If true, use synchronous mode for sqlite</td>
</tr>
<tr>
<td>ssh_conn_timeout=30</td>
<td>(IntOpt) SSH connection timeout in seconds</td>
</tr>
<tr>
<td>ssh_max_pool_conn=5</td>
<td>(IntOpt) Maximum ssh connections in the pool</td>
</tr>
<tr>
<td>ssh_min_pool_conn=1</td>
<td>(IntOpt) Minimum ssh connections in the pool</td>
</tr>
<tr>
<td>ssl_ca_file=None</td>
<td>(StrOpt) CA certificate file to use to verify connecting clients</td>
</tr>
<tr>
<td>ssl_cert_file=None</td>
<td>(StrOpt) Certificate file to use when starting the server securely</td>
</tr>
<tr>
<td>ssl_key_file=None</td>
<td>(StrOpt) Private key file to use when starting the server securely</td>
</tr>
<tr>
<td>state_path=$pybasedir</td>
<td>(StrOpt) Top-level directory for maintaining cinder's state</td>
</tr>
<tr>
<td>storage_availability_zone=nova</td>
<td>(StrOpt) availability zone of this node</td>
</tr>
<tr>
<td>syslog_log_facility=LOG_USER</td>
<td>(StrOpt) syslog facility to receive log lines</td>
</tr>
<tr>
<td>tcp_keepidle=600</td>
<td>(IntOpt) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X.</td>
</tr>
<tr>
<td>topics=notifications</td>
<td>(ListOpt) AMQP topic(s) used for openstack notifications</td>
</tr>
<tr>
<td>until_refresh=0</td>
<td>(IntOpt) count of reservations until usage is refreshed</td>
</tr>
<tr>
<td>use_default_quota_class=True</td>
<td>(BoolOpt) whether to use default quota class for default quota</td>
</tr>
<tr>
<td>use_forwarded_for=False</td>
<td>(BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.</td>
</tr>
<tr>
<td>use_stderr=True</td>
<td>(BoolOpt) Log output to standard error</td>
</tr>
<tr>
<td>use_syslog=False</td>
<td>(BoolOpt) Use syslog for logging.</td>
</tr>
<tr>
<td>use_tpool=False</td>
<td>(BoolOpt) Enable the experimental use of thread pooling for all DB API calls</td>
</tr>
<tr>
<td>verbose=False</td>
<td>(BoolOpt) Print more verbose output (set logging level to INFO instead of default WARNING level).</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for connection</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>connection=sqlite:///$state_path/$sqlite_db</td>
<td>(StrOpt) The SQLAlchemy connection string used to connect to the database</td>
</tr>
<tr>
<td>connection_debug=0</td>
<td>(IntOpt) Verbosity of SQL debugging information. 0=None, 100=Everything</td>
</tr>
<tr>
<td>connection_trace=False</td>
<td>(BoolOpt) Add python stack traces to SQL as comment strings</td>
</tr>
<tr>
<td>connection_type=None</td>
<td>(StrOpt) Virtualization api connection type : libvirt, xenapi, or fake</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for database</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>db_backend=sqlalchemy</td>
<td>(StrOpt) The backend to use for db</td>
</tr>
<tr>
<td>db_driver=cinder.db</td>
<td>(StrOpt) driver to use for database access</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,84 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for images</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>glance_api_insecure=False</td>
<td>(BoolOpt) Allow to perform insecure SSL (https) requests to glance</td>
</tr>
<tr>
<td>glance_api_servers=$glance_host:$glance_port</td>
<td>(ListOpt) A list of the glance api servers available to cinder ([hostname|ip]:port)</td>
</tr>
<tr>
<td>glance_api_ssl_compression=False</td>
<td>(BoolOpt) Whether to attempt to negotiate SSL layer compression when using SSL (https) requests. Set to False to disable SSL layer compression. In some cases disabling this may improve data throughput, eg when high network bandwidth is available and you are using already compressed image formats such as qcow2 .</td>
</tr>
<tr>
<td>glance_api_version=1</td>
<td>(IntOpt) Version of the glance api to use</td>
</tr>
<tr>
<td>glance_host=$my_ip</td>
<td>(StrOpt) default glance hostname or ip</td>
</tr>
<tr>
<td>glance_num_retries=0</td>
<td>(IntOpt) Number retries when downloading an image from glance</td>
</tr>
<tr>
<td>glance_port=9292</td>
<td>(IntOpt) default glance port</td>
</tr>
<tr>
<td>gpfs_images_dir=None</td>
<td>(StrOpt) Path to GPFS Glance repository as mounted on Nova nodes</td>
</tr>
<tr>
<td>gpfs_images_share_mode=None</td>
<td>(StrOpt) Set this if Glance image repo is on GPFS as well so that the image bits can be transferred efficiently between Glance and Cinder. Valid values are copy or copy_on_write. copy performs a full copy of the image, copy_on_write efficiently shares unmodified blocks of the image.</td>
</tr>
<tr>
<td>gpfs_max_clone_depth=0</td>
<td>(IntOpt) A lengthy chain of copy-on-write snapshots or clones could have impact on performance. This option limits the number of indirections required to reach a specific block. 0 indicates unlimited.</td>
</tr>
<tr>
<td>gpfs_mount_point_base=None</td>
<td>(StrOpt) Path to the directory on GPFS mount point where volumes are stored</td>
</tr>
<tr>
<td>gpfs_sparse_volumes=True</td>
<td>(BoolOpt) Create volumes as sparse files which take no space. If set to False volume is created as regular file. In this case volume creation may take a significantly longer time.</td>
</tr>
<tr>
<td>image_conversion_dir=/tmp</td>
<td>(StrOpt) parent dir for tempdir used for image conversion</td>
</tr>
<tr>
<td>instance_format=[instance: %(uuid)s]</td>
<td>(StrOpt) If an instance is passed with the log message, format it like this</td>
</tr>
<tr>
<td>instance_uuid_format=[instance: %(uuid)s]</td>
<td>(StrOpt) If an instance UUID is passed with the log message, format it like this</td>
</tr>
<tr>
<td>use_multipath_for_image_xfer=False</td>
<td>(BoolOpt) Do we attach/detach volumes in cinder using multipath for volume to image and image to volume transfers?</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for log</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>backlog=4096</td>
<td>(IntOpt) Number of backlog requests to configure the socket with</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,208 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for rpc</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>allowed_rpc_exception_modules=cinder.openstack.common.exception,nova.exception,cinder.exception,exceptions</td>
<td>(ListOpt) Modules of exceptions that are permitted to be recreatedupon receiving exception data from an rpc call.</td>
</tr>
<tr>
<td>amqp_rpc_single_reply_queue=False</td>
<td>(BoolOpt) Enable a fast single reply queue if using AMQP based RPC like RabbitMQ or Qpid.</td>
</tr>
<tr>
<td>control_exchange=openstack</td>
<td>(StrOpt) AMQP exchange to connect to if using RabbitMQ or Qpid</td>
</tr>
<tr>
<td>fake_rabbit=False</td>
<td>(BoolOpt) If passed, use a fake RabbitMQ provider</td>
</tr>
<tr>
<td>kombu_ssl_ca_certs=</td>
<td>(StrOpt) SSL certification authority file (valid only if SSL enabled)</td>
</tr>
<tr>
<td>kombu_ssl_certfile=</td>
<td>(StrOpt) SSL cert file (valid only if SSL enabled)</td>
</tr>
<tr>
<td>kombu_ssl_keyfile=</td>
<td>(StrOpt) SSL key file (valid only if SSL enabled)</td>
</tr>
<tr>
<td>kombu_ssl_version=</td>
<td>(StrOpt) SSL version to use (valid only if SSL enabled)</td>
</tr>
<tr>
<td>matchmaker_heartbeat_freq=300</td>
<td>(IntOpt) Heartbeat frequency</td>
</tr>
<tr>
<td>matchmaker_heartbeat_ttl=600</td>
<td>(IntOpt) Heartbeat time-to-live.</td>
</tr>
<tr>
<td>matchmaker_ringfile=/etc/nova/matchmaker_ring.json</td>
<td>(StrOpt) Matchmaker ring file (JSON)</td>
</tr>
<tr>
<td>notification_driver=[]</td>
<td>(MultiStrOpt) Driver or drivers to handle sending notifications</td>
</tr>
<tr>
<td>notification_topics=notifications</td>
<td>(ListOpt) AMQP topic used for openstack notifications</td>
</tr>
<tr>
<td>publish_errors=False</td>
<td>(BoolOpt) publish error events</td>
</tr>
<tr>
<td>qpid_heartbeat=60</td>
<td>(IntOpt) Seconds between connection keepalive heartbeats</td>
</tr>
<tr>
<td>qpid_hostname=localhost</td>
<td>(StrOpt) Qpid broker hostname</td>
</tr>
<tr>
<td>qpid_hosts=$qpid_hostname:$qpid_port</td>
<td>(ListOpt) Qpid HA cluster host:port pairs</td>
</tr>
<tr>
<td>qpid_password=</td>
<td>(StrOpt) Password for qpid connection</td>
</tr>
<tr>
<td>qpid_port=5672</td>
<td>(IntOpt) Qpid broker port</td>
</tr>
<tr>
<td>qpid_protocol=tcp</td>
<td>(StrOpt) Transport to use, either 'tcp' or 'ssl'</td>
</tr>
<tr>
<td>qpid_sasl_mechanisms=</td>
<td>(StrOpt) Space separated list of SASL mechanisms to use for auth</td>
</tr>
<tr>
<td>qpid_tcp_nodelay=True</td>
<td>(BoolOpt) Disable Nagle algorithm</td>
</tr>
<tr>
<td>qpid_username=</td>
<td>(StrOpt) Username for qpid connection</td>
</tr>
<tr>
<td>rabbit_durable_queues=False</td>
<td>(BoolOpt) use durable queues in RabbitMQ</td>
</tr>
<tr>
<td>rabbit_ha_queues=False</td>
<td>(BoolOpt) use H/A queues in RabbitMQ (x-ha-policy: all).You need to wipe RabbitMQ database when changing this option.</td>
</tr>
<tr>
<td>rabbit_host=localhost</td>
<td>(StrOpt) The RabbitMQ broker address where a single node is used</td>
</tr>
<tr>
<td>rabbit_hosts=$rabbit_host:$rabbit_port</td>
<td>(ListOpt) RabbitMQ HA cluster host:port pairs</td>
</tr>
<tr>
<td>rabbit_max_retries=0</td>
<td>(IntOpt) maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count)</td>
</tr>
<tr>
<td>rabbit_password=guest</td>
<td>(StrOpt) the RabbitMQ password</td>
</tr>
<tr>
<td>rabbit_port=5672</td>
<td>(IntOpt) The RabbitMQ broker port where a single node is used</td>
</tr>
<tr>
<td>rabbit_retry_backoff=2</td>
<td>(IntOpt) how long to backoff for between retries when connecting to RabbitMQ</td>
</tr>
<tr>
<td>rabbit_retry_interval=1</td>
<td>(IntOpt) how frequently to retry connecting with RabbitMQ</td>
</tr>
<tr>
<td>rabbit_use_ssl=False</td>
<td>(BoolOpt) connect over SSL for RabbitMQ</td>
</tr>
<tr>
<td>rabbit_userid=guest</td>
<td>(StrOpt) the RabbitMQ userid</td>
</tr>
<tr>
<td>rabbit_virtual_host=/</td>
<td>(StrOpt) the RabbitMQ virtual host</td>
</tr>
<tr>
<td>rpc_backend=cinder.openstack.common.rpc.impl_kombu</td>
<td>(StrOpt) The messaging module to use, defaults to kombu.</td>
</tr>
<tr>
<td>rpc_cast_timeout=30</td>
<td>(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.</td>
</tr>
<tr>
<td>rpc_conn_pool_size=30</td>
<td>(IntOpt) Size of RPC connection pool</td>
</tr>
<tr>
<td>rpc_response_timeout=60</td>
<td>(IntOpt) Seconds to wait for a response from call or multicall</td>
</tr>
<tr>
<td>rpc_thread_pool_size=64</td>
<td>(IntOpt) Size of RPC thread pool</td>
</tr>
<tr>
<td>rpc_zmq_bind_address=*</td>
<td>(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.</td>
</tr>
<tr>
<td>rpc_zmq_contexts=1</td>
<td>(IntOpt) Number of ZeroMQ contexts, defaults to 1</td>
</tr>
<tr>
<td>rpc_zmq_host=autodoc</td>
<td>(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.</td>
</tr>
<tr>
<td>rpc_zmq_ipc_dir=/var/run/openstack</td>
<td>(StrOpt) Directory for holding IPC sockets</td>
</tr>
<tr>
<td>rpc_zmq_matchmaker=cinder.openstack.common.rpc.matchmaker.MatchMakerLocalhost</td>
<td>(StrOpt) MatchMaker driver</td>
</tr>
<tr>
<td>rpc_zmq_port=9501</td>
<td>(IntOpt) ZeroMQ receiver listening port</td>
</tr>
<tr>
<td>rpc_zmq_topic_backlog=None</td>
<td>(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,52 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for scheduler</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>scheduler_default_filters=AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter</td>
<td>(ListOpt) Which filter class names to use for filtering hosts when not specified in the request.</td>
</tr>
<tr>
<td>scheduler_default_weighers=CapacityWeigher</td>
<td>(ListOpt) Which weigher class names to use for weighing hosts.</td>
</tr>
<tr>
<td>scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler</td>
<td>(StrOpt) Default scheduler driver to use</td>
</tr>
<tr>
<td>scheduler_host_manager=cinder.scheduler.host_manager.HostManager</td>
<td>(StrOpt) The scheduler host manager class to use</td>
</tr>
<tr>
<td>scheduler_json_config_location=</td>
<td>(StrOpt) Absolute path to scheduler configuration JSON file.</td>
</tr>
<tr>
<td>scheduler_manager=cinder.scheduler.manager.SchedulerManager</td>
<td>(StrOpt) full class name for the Manager for scheduler</td>
</tr>
<tr>
<td>scheduler_max_attempts=3</td>
<td>(IntOpt) Maximum number of attempts to schedule an volume</td>
</tr>
<tr>
<td>scheduler_topic=cinder-scheduler</td>
<td>(StrOpt) the topic scheduler nodes listen on</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,496 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for storage</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>available_devices=</td>
<td>(ListOpt) List of all available devices</td>
</tr>
<tr>
<td>backend=sqlalchemy</td>
<td>(StrOpt) The backend to use for db</td>
</tr>
<tr>
<td>bindir=$pybasedir/bin</td>
<td>(StrOpt) Directory where cinder binaries are installed</td>
</tr>
<tr>
<td>capacity_weight_multiplier=1.0</td>
<td>(FloatOpt) Multiplier used for weighing volume capacity. Negative numbers mean to stack vs spread.</td>
</tr>
<tr>
<td>cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml</td>
<td>(StrOpt) config data for cinder huawei plugin</td>
</tr>
<tr>
<td>coraid_esm_address=</td>
<td>(StrOpt) IP address of Coraid ESM</td>
</tr>
<tr>
<td>coraid_group=admin</td>
<td>(StrOpt) Name of group on Coraid ESM to which coraid_user belongs (must have admin privilege)</td>
</tr>
<tr>
<td>coraid_password=password</td>
<td>(StrOpt) Password to connect to Coraid ESM</td>
</tr>
<tr>
<td>coraid_repository_key=coraid_repository</td>
<td>(StrOpt) Volume Type key name to store ESM Repository Name</td>
</tr>
<tr>
<td>coraid_user=admin</td>
<td>(StrOpt) User name to connect to Coraid ESM</td>
</tr>
<tr>
<td>enabled_backends=None</td>
<td>(ListOpt) A list of backend names to use. These backend names should be backed by a unique [CONFIG] group with its options</td>
</tr>
<tr>
<td>glusterfs_disk_util=df</td>
<td>(StrOpt) Use du or df for free space calculation</td>
</tr>
<tr>
<td>glusterfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for gluster shares</td>
</tr>
<tr>
<td>glusterfs_shares_config=/etc/cinder/glusterfs_shares</td>
<td>(StrOpt) File with the list of available gluster shares</td>
</tr>
<tr>
<td>glusterfs_sparsed_volumes=True</td>
<td>(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.</td>
</tr>
<tr>
<td>hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml</td>
<td>(StrOpt) configuration file for HDS cinder plugin for HUS</td>
</tr>
<tr>
<td>iscsi_helper=tgtadm</td>
<td>(StrOpt) iscsi target user-land tool to use</td>
</tr>
<tr>
<td>iscsi_iotype=fileio</td>
<td>(StrOpt) Sets the behavior of the iSCSI target to either perform blockio or fileio optionally, auto can be set and Cinder will autodetect type of backing device</td>
</tr>
<tr>
<td>iscsi_ip_address=$my_ip</td>
<td>(StrOpt) The port that the iSCSI daemon is listening on</td>
</tr>
<tr>
<td>iscsi_num_targets=100</td>
<td>(IntOpt) Number of iscsi target ids per host</td>
</tr>
<tr>
<td>iscsi_port=3260</td>
<td>(IntOpt) The port that the iSCSI daemon is listening on</td>
</tr>
<tr>
<td>iscsi_target_prefix=iqn.2010-10.org.openstack:</td>
<td>(StrOpt) prefix for iscsi volumes</td>
</tr>
<tr>
<td>lvm_mirrors=0</td>
<td>(IntOpt) If set, create lvms with multiple mirrors. Note that this requires lvm_mirrors + 2 pvs with available space</td>
</tr>
<tr>
<td>max_age=0</td>
<td>(IntOpt) number of seconds between subsequent usage refreshes</td>
</tr>
<tr>
<td>max_gigabytes=10000</td>
<td>(IntOpt) maximum number of volume gigabytes to allow per host</td>
</tr>
<tr>
<td>max_overflow=None</td>
<td>(IntOpt) If set, use this value for max_overflow with sqlalchemy</td>
</tr>
<tr>
<td>max_pool_size=5</td>
<td>(IntOpt) Maximum number of SQL connections to keep open in a pool</td>
</tr>
<tr>
<td>max_retries=10</td>
<td>(IntOpt) maximum db connection retries during startup. (setting -1 implies an infinite retry count)</td>
</tr>
<tr>
<td>memcached_servers=None</td>
<td>(ListOpt) Memcached servers or None for in process cache.</td>
</tr>
<tr>
<td>migration_create_volume_timeout_secs=300</td>
<td>(IntOpt) Timeout for creating the volume to migrate to when performing volume migration (seconds)</td>
</tr>
<tr>
<td>min_pool_size=1</td>
<td>(IntOpt) Minimum number of SQL connections to keep open in a pool</td>
</tr>
<tr>
<td>netapp_login=None</td>
<td>(StrOpt) User name for the storage controller</td>
</tr>
<tr>
<td>netapp_password=None</td>
<td>(StrOpt) Password for the storage controller</td>
</tr>
<tr>
<td>netapp_server_hostname=None</td>
<td>(StrOpt) Host name for the storage controller</td>
</tr>
<tr>
<td>netapp_server_port=80</td>
<td>(IntOpt) Port number for the storage controller</td>
</tr>
<tr>
<td>netapp_size_multiplier=1.2</td>
<td>(FloatOpt) Volume size multiplier to ensure while creation</td>
</tr>
<tr>
<td>netapp_storage_family=ontap_cluster</td>
<td>(StrOpt) Storage family type.</td>
</tr>
<tr>
<td>netapp_storage_protocol=None</td>
<td>(StrOpt) Storage protocol type.</td>
</tr>
<tr>
<td>netapp_transport_type=http</td>
<td>(StrOpt) Transport type protocol</td>
</tr>
<tr>
<td>netapp_vfiler=None</td>
<td>(StrOpt) Vfiler to use for provisioning</td>
</tr>
<tr>
<td>netapp_volume_list=None</td>
<td>(StrOpt) Comma separated volumes to be used for provisioning</td>
</tr>
<tr>
<td>netapp_vserver=openstack</td>
<td>(StrOpt) Cluster vserver to use for provisioning</td>
</tr>
<tr>
<td>nexenta_blocksize=</td>
<td>(StrOpt) block size for volumes (blank=default,8KB)</td>
</tr>
<tr>
<td>nexenta_host=</td>
<td>(StrOpt) IP address of Nexenta SA</td>
</tr>
<tr>
<td>nexenta_iscsi_target_portal_port=3260</td>
<td>(IntOpt) Nexenta target portal port</td>
</tr>
<tr>
<td>nexenta_password=nexenta</td>
<td>(StrOpt) Password to connect to Nexenta SA</td>
</tr>
<tr>
<td>nexenta_rest_port=2000</td>
<td>(IntOpt) HTTP port to connect to Nexenta REST API server</td>
</tr>
<tr>
<td>nexenta_rest_protocol=auto</td>
<td>(StrOpt) Use http or https for REST connection (default auto)</td>
</tr>
<tr>
<td>nexenta_sparse=False</td>
<td>(BoolOpt) flag to create sparse volumes</td>
</tr>
<tr>
<td>nexenta_target_group_prefix=cinder/</td>
<td>(StrOpt) prefix for iSCSI target groups on SA</td>
</tr>
<tr>
<td>nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder-</td>
<td>(StrOpt) IQN prefix for iSCSI targets</td>
</tr>
<tr>
<td>nexenta_user=admin</td>
<td>(StrOpt) User name to connect to Nexenta SA</td>
</tr>
<tr>
<td>nexenta_volume=cinder</td>
<td>(StrOpt) pool on SA that will hold all volumes</td>
</tr>
<tr>
<td>nfs_mount_options=None</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details</td>
</tr>
<tr>
<td>nfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Base dir containing mount points for nfs shares</td>
</tr>
<tr>
<td>nfs_oversub_ratio=1.0</td>
<td>(FloatOpt) This will compare the allocated to available space on the volume destination. If the ratio exceeds this number, the destination will no longer be valid.</td>
</tr>
<tr>
<td>nfs_shares_config=/etc/cinder/nfs_shares</td>
<td>(StrOpt) File with the list of available nfs shares</td>
</tr>
<tr>
<td>nfs_sparsed_volumes=True</td>
<td>(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.</td>
</tr>
<tr>
<td>nfs_used_ratio=0.95</td>
<td>(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination.</td>
</tr>
<tr>
<td>rbd_ceph_conf=</td>
<td>(StrOpt) path to the ceph configuration file to use</td>
</tr>
<tr>
<td>rbd_flatten_volume_from_snapshot=False</td>
<td>(BoolOpt) flatten volumes created from snapshots to remove dependency</td>
</tr>
<tr>
<td>rbd_pool=rbd</td>
<td>(StrOpt) the RADOS pool in which rbd volumes are stored</td>
</tr>
<tr>
<td>rbd_secret_uuid=None</td>
<td>(StrOpt) the libvirt uuid of the secret for the rbd_uservolumes</td>
</tr>
<tr>
<td>rbd_user=None</td>
<td>(StrOpt) the RADOS client name for accessing rbd volumes - only set when using cephx authentication</td>
</tr>
<tr>
<td>san_clustername=</td>
<td>(StrOpt) Cluster name to use for creating volumes</td>
</tr>
<tr>
<td>san_ip=</td>
<td>(StrOpt) IP address of SAN controller</td>
</tr>
<tr>
<td>san_is_local=False</td>
<td>(BoolOpt) Execute commands locally instead of over SSH; use if the volume service is running on the SAN device</td>
</tr>
<tr>
<td>san_login=admin</td>
<td>(StrOpt) Username for SAN controller</td>
</tr>
<tr>
<td>san_password=</td>
<td>(StrOpt) Password for SAN controller</td>
</tr>
<tr>
<td>san_private_key=</td>
<td>(StrOpt) Filename of private key to use for SSH authentication</td>
</tr>
<tr>
<td>san_ssh_port=22</td>
<td>(IntOpt) SSH port to use with SAN</td>
</tr>
<tr>
<td>san_thin_provision=True</td>
<td>(BoolOpt) Use thin provisioning for SAN volumes?</td>
</tr>
<tr>
<td>san_zfs_volume_base=rpool/</td>
<td>(StrOpt) The ZFS path under which to create zvols for volumes.</td>
</tr>
<tr>
<td>scality_sofs_config=None</td>
<td>(StrOpt) Path or URL to Scality SOFS configuration file</td>
</tr>
<tr>
<td>scality_sofs_mount_point=$state_path/scality</td>
<td>(StrOpt) Base dir where Scality SOFS shall be mounted</td>
</tr>
<tr>
<td>scality_sofs_volume_dir=cinder/volumes</td>
<td>(StrOpt) Path from Scality SOFS root to volume dir</td>
</tr>
<tr>
<td>sf_account_prefix=autodoc</td>
<td>(StrOpt) Create SolidFire accounts with this prefix</td>
</tr>
<tr>
<td>sf_allow_tenant_qos=False</td>
<td>(BoolOpt) Allow tenants to specify QOS on create</td>
</tr>
<tr>
<td>sf_emulate_512=True</td>
<td>(BoolOpt) Set 512 byte emulation on volume creation; </td>
</tr>
<tr>
<td>storwize_svc_connection_protocol=iSCSI</td>
<td>(StrOpt) Connection protocol (iSCSI/FC)</td>
</tr>
<tr>
<td>storwize_svc_flashcopy_timeout=120</td>
<td>(IntOpt) Maximum number of seconds to wait for FlashCopy to be prepared. Maximum value is 600 seconds (10 minutes).</td>
</tr>
<tr>
<td>storwize_svc_multihostmap_enabled=True</td>
<td>(BoolOpt) Allows vdisk to multi host mapping</td>
</tr>
<tr>
<td>storwize_svc_multipath_enabled=False</td>
<td>(BoolOpt) Connect with multipath (currently FC-only)</td>
</tr>
<tr>
<td>storwize_svc_vol_autoexpand=True</td>
<td>(BoolOpt) Storage system autoexpand parameter for volumes (True/False)</td>
</tr>
<tr>
<td>storwize_svc_vol_compression=False</td>
<td>(BoolOpt) Storage system compression option for volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_easytier=True</td>
<td>(BoolOpt) Enable Easy Tier for volumes</td>
</tr>
<tr>
<td>storwize_svc_vol_grainsize=256</td>
<td>(IntOpt) Storage system grain size parameter for volumes (32/64/128/256)</td>
</tr>
<tr>
<td>storwize_svc_vol_rsize=2</td>
<td>(IntOpt) Storage system space-efficiency parameter for volumes (percentage)</td>
</tr>
<tr>
<td>storwize_svc_vol_warning=0</td>
<td>(IntOpt) Storage system threshold for volume capacity warnings (percentage)</td>
</tr>
<tr>
<td>storwize_svc_volpool_name=volpool</td>
<td>(StrOpt) Storage system storage pool for volumes</td>
</tr>
<tr>
<td>volume_backend_name=None</td>
<td>(StrOpt) The backend name for a given driver implementation</td>
</tr>
<tr>
<td>volume_clear=zero</td>
<td>(StrOpt) Method used to wipe old volumes (valid options are: none, zero, shred)</td>
</tr>
<tr>
<td>volume_clear_size=0</td>
<td>(IntOpt) Size in MiB to wipe at start of old volumes. 0 =&gt; all</td>
</tr>
<tr>
<td>volume_dd_blocksize=1M</td>
<td>(StrOpt) The default block size used when copying/clearing volumes</td>
</tr>
<tr>
<td>volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver</td>
<td>(StrOpt) Driver to use for volume creation</td>
</tr>
<tr>
<td>volume_group=cinder-volumes</td>
<td>(StrOpt) Name for the VG that will contain exported volumes</td>
</tr>
<tr>
<td>volume_manager=cinder.volume.manager.VolumeManager</td>
<td>(StrOpt) full class name for the Manager for volume</td>
</tr>
<tr>
<td>volume_name_template=volume-%s</td>
<td>(StrOpt) Template string to be used to generate volume names</td>
</tr>
<tr>
<td>volume_tmp_dir=None</td>
<td>(StrOpt) where to store temporary image files if the volume driver does not write them directly to the volume</td>
</tr>
<tr>
<td>volume_topic=cinder-volume</td>
<td>(StrOpt) the topic volume nodes listen on</td>
</tr>
<tr>
<td>volume_transfer_key_length=16</td>
<td>(IntOpt) The number of characters in the autogenerated auth key.</td>
</tr>
<tr>
<td>volume_transfer_salt_length=8</td>
<td>(IntOpt) The number of characters in the salt.</td>
</tr>
<tr>
<td>volume_usage_audit_period=month</td>
<td>(StrOpt) time period to generate volume usages for. Time period must be hour, day, month or year</td>
</tr>
<tr>
<td>volumes_dir=$state_path/volumes</td>
<td>(StrOpt) Volume configuration file storage directory</td>
</tr>
<tr>
<td>windows_iscsi_lun_path=C:\iSCSIVirtualDisks</td>
<td>(StrOpt) Path to store VHD backed volumes</td>
</tr>
<tr>
<td>xiv_proxy=xiv_openstack.nova_proxy.XIVNovaProxy</td>
<td>(StrOpt) Proxy driver</td>
</tr>
<tr>
<td>zadara_default_cache_policy=write-through</td>
<td>(StrOpt) Default cache policy for volumes</td>
</tr>
<tr>
<td>zadara_default_encryption=NO</td>
<td>(StrOpt) Default encryption policy for volumes</td>
</tr>
<tr>
<td>zadara_default_stripesize=64</td>
<td>(StrOpt) Default stripe size for volumes</td>
</tr>
<tr>
<td>zadara_default_striping_mode=simple</td>
<td>(StrOpt) Default striping mode for volumes</td>
</tr>
<tr>
<td>zadara_password=None</td>
<td>(StrOpt) Password for the VPSA</td>
</tr>
<tr>
<td>zadara_user=None</td>
<td>(StrOpt) User name for the VPSA</td>
</tr>
<tr>
<td>zadara_vol_name_template=OS_%s</td>
<td>(StrOpt) Default template for VPSA volume names</td>
</tr>
<tr>
<td>zadara_vpsa_allow_nonexistent_delete=True</td>
<td>(BoolOpt) Don't halt on deletion of non-existing volumes</td>
</tr>
<tr>
<td>zadara_vpsa_auto_detach_on_delete=True</td>
<td>(BoolOpt) Automatically detach from servers on volume delete</td>
</tr>
<tr>
<td>zadara_vpsa_ip=None</td>
<td>(StrOpt) Management IP of Zadara VPSA</td>
</tr>
<tr>
<td>zadara_vpsa_poolname=None</td>
<td>(StrOpt) Name of VPSA storage pool for volumes</td>
</tr>
<tr>
<td>zadara_vpsa_port=None</td>
<td>(StrOpt) Zadara VPSA port number</td>
</tr>
<tr>
<td>zadara_vpsa_use_ssl=False</td>
<td>(BoolOpt) Use SSL connection</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for zones</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>cloned_volume_same_az=True</td>
<td>(BoolOpt) Ensure that the new volumes are the same AZ as snapshot or source volume</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for lbaas</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>driver_fqn=neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver</td>
<td>(StrOpt) LBaaS driver Fully Qualified Name</td>
</tr>
<tr>
<td>loadbalancer_pool_scheduler_driver=neutron.services.loadbalancer.agent_scheduler.ChanceScheduler</td>
<td>(StrOpt) Driver to use for scheduling pool to a default loadbalancer agent</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for ml2</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>flat_networks=</td>
<td>(ListOpt) List of physical_network names with which flat networks can be created. Use * to allow flat networks with arbitrary physical_network names.</td>
</tr>
<tr>
<td>mechanism_drivers=</td>
<td>(ListOpt) An ordered list of networking mechanism driver entrypoints to be loaded from the neutron.ml2.mechanism_drivers namespace.</td>
</tr>
<tr>
<td>req_timeout=30</td>
<td>(IntOpt) Total time limit for a cluster request</td>
</tr>
<tr>
<td>tenant_network_types=local</td>
<td>(ListOpt) Ordered list of network_types to allocate as tenant networks.</td>
</tr>
<tr>
<td>type_drivers=local,flat,vlan,gre,vxlan</td>
<td>(ListOpt) List of network type driver entrypoints to be loaded from the neutron.ml2.type_drivers namespace.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for mlnx</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>daemon_endpoint=tcp://127.0.0.1:5001</td>
<td>(StrOpt) eswitch daemon end point</td>
</tr>
<tr>
<td>request_timeout=3000</td>
<td>(IntOpt) The number of milliseconds the agent will wait for response on request to daemon.</td>
</tr>
<tr>
<td>vnic_type=direct</td>
<td>(StrOpt) type of VM network interface: direct or hosdev</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for api</caption>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for apiv3</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>extensions_blacklist=</td>
<td>(ListOpt) A list of v3 API extensions to never load. Specify the extension aliases here.</td>
</tr>
<tr>
<td>extensions_whitelist=</td>
<td>(ListOpt) If the list is not empty then a v3 API extension will only be loaded if it exists in this list. Specify the extension aliases here.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for authentication</caption>
@ -13,7 +17,7 @@
<tbody>
<tr>
<td>api_rate_limit=True</td>
<td>(BoolOpt) whether to rate limit the api</td>
<td>(BoolOpt) whether to use per-user rate limiting for the api.</td>
</tr>
<tr>
<td>auth_strategy=noauth</td>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for availabilityzones</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for baremetal</caption>
@ -64,7 +68,11 @@
<td>(IntOpt) Timeout for PXE deployments. Default: 0 (unlimited)</td>
</tr>
<tr>
<td>sql_connection=sqlite:////home/fifieldt/temp/nova/nova/openstack/common/db/$sqlite_db</td>
<td>pxe_network_config=False</td>
<td>(BoolOpt) If set, pass the network configuration details to the initramfs via cmdline.</td>
</tr>
<tr>
<td>sql_connection=sqlite:////home/ubuntu/nova/nova/openstack/common/db/$sqlite_db</td>
<td>(StrOpt) The SQLAlchemy connection string used to connect to the database</td>
</tr>
<tr>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for ca</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for cells</caption>
@ -19,6 +23,14 @@
<td>capabilities=hypervisor=xenserver;kvm,os=linux;windows</td>
<td>(ListOpt) Key/Multi-value list with the capabilities of the cell</td>
</tr>
<tr>
<td>cell_type=None</td>
<td>(StrOpt) Type of cell: api or compute</td>
</tr>
<tr>
<td>cells_config=None</td>
<td>(StrOpt) Configuration file from which to read cells configuration. If given, overrides reading cells from the database.</td>
</tr>
<tr>
<td>driver=nova.virt.baremetal.pxe.PXE</td>
<td>(StrOpt) Baremetal driver back-end (pxe or tilera)</td>
@ -51,6 +63,18 @@
<td>max_hop_count=10</td>
<td>(IntOpt) Maximum number of hops for cells routing.</td>
</tr>
<tr>
<td>mute_child_interval=300</td>
<td>(IntOpt) Number of seconds after which a lack of capability and capacity updates signals the child cell is to be treated as a mute.</td>
</tr>
<tr>
<td>mute_weight_multiplier=-10.0</td>
<td>(FloatOpt) Multiplier used to weigh mute children. (The value should be negative.)</td>
</tr>
<tr>
<td>mute_weight_value=1000.0</td>
<td>(FloatOpt) Weight value assigned to mute children. (The value should be positive.)</td>
</tr>
<tr>
<td>name=nova</td>
<td>(StrOpt) name of this cell</td>
@ -59,10 +83,6 @@
<td>reserve_percent=10.0</td>
<td>(FloatOpt) Percentage of cell capacity to hold in reserve. Affects both memory and disk utilization</td>
</tr>
<tr>
<td>scheduler=nova.cells.scheduler.CellsScheduler</td>
<td>(StrOpt) Cells scheduler to use</td>
</tr>
<tr>
<td>topic=cells</td>
<td>(StrOpt) the topic cells nodes listen on</td>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for common</caption>
@ -12,7 +16,7 @@
</thead>
<tbody>
<tr>
<td>bindir=$pybasedir/bin</td>
<td>bindir=/usr/local/bin</td>
<td>(StrOpt) Directory where nova binaries are installed</td>
</tr>
<tr>
@ -32,9 +36,13 @@
<td>(BoolOpt) Whether to disable inter-process locks</td>
</tr>
<tr>
<td>host=usagi</td>
<td>host=docwork</td>
<td>(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address. However, the node name must be valid within an AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address</td>
</tr>
<tr>
<td>host=127.0.0.1</td>
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>lock_path=None</td>
<td>(StrOpt) Directory to use for lock files. Default to a temp directory</td>
@ -44,7 +52,7 @@
<td>(ListOpt) Memcached servers or None for in process cache.</td>
</tr>
<tr>
<td>my_ip=192.168.1.32</td>
<td>my_ip=115.146.84.189</td>
<td>(StrOpt) ip address of this host</td>
</tr>
<tr>
@ -59,16 +67,12 @@
<td>notify_api_faults=False</td>
<td>(BoolOpt) If set, send api.fault notifications on caught exceptions in the API service.</td>
</tr>
<tr>
<td>notify_on_any_change=False</td>
<td>(BoolOpt) If set, send compute.instance.update notifications on instance state changes. Valid values are False for no notifications, True for notifications on any instance changes.</td>
</tr>
<tr>
<td>notify_on_state_change=None</td>
<td>(StrOpt) If set, send compute.instance.update notifications on instance state changes. Valid values are None for no notifications, "vm_state" for notifications on VM state changes, or "vm_and_task_state" for notifications on VM and task state changes.</td>
</tr>
<tr>
<td>pybasedir=/home/fifieldt/temp/nova</td>
<td>pybasedir=/home/ubuntu/nova</td>
<td>(StrOpt) Directory where the nova python module is installed</td>
</tr>
<tr>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for compute</caption>
@ -21,7 +25,7 @@
</tr>
<tr>
<td>compute_api_class=nova.compute.api.API</td>
<td>(StrOpt) The full class name of the compute API class to use</td>
<td>(StrOpt) The full class name of the compute API class to use (deprecated)</td>
</tr>
<tr>
<td>compute_driver=None</td>
@ -36,7 +40,7 @@
<td>(StrOpt) Class that will manage stats for the local compute host</td>
</tr>
<tr>
<td>console_host=usagi</td>
<td>console_host=docwork</td>
<td>(StrOpt) Console proxy host to use to connect to instances on this host.</td>
</tr>
<tr>
@ -44,15 +48,15 @@
<td>(StrOpt) full class name for the Manager for console proxy</td>
</tr>
<tr>
<td>default_instance_type=m1.small</td>
<td>(StrOpt) default instance type to use, testing only</td>
<td>default_flavor=m1.small</td>
<td>(StrOpt) default flavor to use, testing only</td>
</tr>
<tr>
<td>default_notification_level=INFO</td>
<td>(StrOpt) Default notification level for outgoing notifications</td>
</tr>
<tr>
<td>default_publisher_id=$host</td>
<td>default_publisher_id=None</td>
<td>(StrOpt) Default publisher_id for outgoing notifications</td>
</tr>
<tr>
@ -119,6 +123,18 @@
<td>running_deleted_instance_timeout=0</td>
<td>(IntOpt) Number of seconds after being deleted when a running instance should be considered eligible for cleanup.</td>
</tr>
<tr>
<td>shelved_offload_time=0</td>
<td>(IntOpt) Time in seconds before a shelved instance is eligible for removing from a host. -1 never offload, 0 offload when shelved</td>
</tr>
<tr>
<td>shelved_poll_interval=3600</td>
<td>(IntOpt) Interval in seconds for polling shelved instances to offload</td>
</tr>
<tr>
<td>sync_power_state_interval=600</td>
<td>(IntOpt) interval to sync power states between the database and the hypervisor</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for conductor</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for configdrive</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for console</caption>
@ -12,7 +16,7 @@
</thead>
<tbody>
<tr>
<td>console_public_hostname=usagi</td>
<td>console_public_hostname=docwork</td>
<td>(StrOpt) Publicly visible name for this console host</td>
</tr>
<tr>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for db</caption>
@ -32,7 +36,7 @@
<td>(BoolOpt) Enable the experimental use of thread pooling for all DB API calls</td>
</tr>
<tr>
<td>sql_connection=sqlite:////home/fifieldt/temp/nova/nova/openstack/common/db/$sqlite_db</td>
<td>sql_connection=sqlite:////home/ubuntu/nova/nova/openstack/common/db/$sqlite_db</td>
<td>(StrOpt) The SQLAlchemy connection string used to connect to the database</td>
</tr>
<tr>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for ec2</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for fping</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for glance</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for hyperv</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for hypervisor</caption>
@ -67,6 +71,10 @@
<td>libvirt_inject_password=False</td>
<td>(BoolOpt) Inject the admin password at boot time, without an agent.</td>
</tr>
<tr>
<td>libvirt_iscsi_use_multipath=False</td>
<td>(BoolOpt) use multipath connection of the iSCSI volume</td>
</tr>
<tr>
<td>libvirt_lvm_snapshot_size=1000</td>
<td>(IntOpt) The amount of storage (in megabytes) to allocate for LVM snapshot copy-on-write blocks.</td>
@ -75,6 +83,10 @@
<td>libvirt_nonblocking=True</td>
<td>(BoolOpt) Use a separated OS thread pool to realize non-blocking libvirt calls</td>
</tr>
<tr>
<td>libvirt_ovs_bridge=br-int</td>
<td>(StrOpt) Name of Integration Bridge used by Open vSwitch</td>
</tr>
<tr>
<td>libvirt_snapshot_compression=False</td>
<td>(BoolOpt) Compress snapshot images when possible. This currently applies exclusively to qcow2 images</td>
@ -95,6 +107,10 @@
<td>libvirt_uri=</td>
<td>(StrOpt) Override the default libvirt URI (which is dependent on libvirt_type)</td>
</tr>
<tr>
<td>libvirt_use_virtio_for_bridges=True</td>
<td>(BoolOpt) Use virtio for bridge interfaces with KVM/QEMU</td>
</tr>
<tr>
<td>libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver</td>
<td>(StrOpt) The libvirt VIF driver to configure the VIFs.</td>
@ -159,6 +175,10 @@
<td>use_usb_tablet=True</td>
<td>(BoolOpt) Sync virtual and real mouse cursors in Windows VMs</td>
</tr>
<tr>
<td>vcpu_pin_set=None</td>
<td>(StrOpt) Which pcpus can be used by vcpus of instance e.g: "4-12,^8,15"</td>
</tr>
<tr>
<td>virt_mkfs=['default=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s']</td>
<td>(MultiStrOpt) mkfs commands for ephemeral device. The format is &lt;os_type&gt;=&lt;mkfs command&gt;</td>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for ipv6</caption>

View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for keymgr</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>keymgr_api_class=nova.keymgr.key_mgr.KeyManager</td>
<td>(StrOpt) The full class name of the key manager API class</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for kombu</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for ldap</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for livemigration</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for logging</caption>
@ -52,15 +56,11 @@
<td>(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout.</td>
</tr>
<tr>
<td>log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s</td>
<td>(StrOpt) A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. Default: %(default)s</td>
<td>log_format=None</td>
<td>(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead.</td>
</tr>
<tr>
<td>logfile_mode=0644</td>
<td>(StrOpt) Default file mode used when creating log files</td>
</tr>
<tr>
<td>logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s</td>
<td>logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s</td>
<td>(StrOpt) format string to use for log messages with context</td>
</tr>
<tr>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for metadata</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for network</caption>
@ -167,6 +171,10 @@
<td>multi_host=False</td>
<td>(BoolOpt) Default value for multi_host in networks. Also, if set, some rpc network calls will be sent directly to host.</td>
</tr>
<tr>
<td>network_allocate_retries=0</td>
<td>(IntOpt) Number of times to retry network allocation on failures</td>
</tr>
<tr>
<td>network_api_class=nova.network.api.API</td>
<td>(StrOpt) The full class name of the network API class to use</td>
@ -211,10 +219,6 @@
<td>security_group_api=nova</td>
<td>(StrOpt) The full class name of the security API class</td>
</tr>
<tr>
<td>security_group_handler=nova.network.sg.NullSecurityGroupHandler</td>
<td>(StrOpt) The full class name of the security group handler class</td>
</tr>
<tr>
<td>send_arp_for_ha=False</td>
<td>(BoolOpt) send gratuitous ARPs for HA setup</td>
@ -223,10 +227,6 @@
<td>send_arp_for_ha_count=3</td>
<td>(IntOpt) send this many gratuitous ARPs for HA setup</td>
</tr>
<tr>
<td>service_quantum_metadata_proxy=False</td>
<td>(BoolOpt) Set flag to indicate Quantum will proxy metadata requests and resolve instance ids.</td>
</tr>
<tr>
<td>share_dhcp_address=False</td>
<td>(BoolOpt) If True in multi_host mode, all compute hosts share the same dhcp address.</td>
@ -244,7 +244,7 @@
<td>(BoolOpt) if set, uses the dns1 and dns2 from the network ref.as dns servers.</td>
</tr>
<tr>
<td>use_quantum_default_nets=False</td>
<td>use_neutron_default_nets=False</td>
<td>(StrOpt) Control for checking for default networks</td>
</tr>
<tr>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for periodic</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for policy</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for powervm</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for qpid</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for quota</caption>
@ -15,6 +19,10 @@
<td>bandwidth_poll_interval=600</td>
<td>(IntOpt) interval to pull bandwidth usage info</td>
</tr>
<tr>
<td>bandwidth_update_interval=600</td>
<td>(IntOpt) Seconds between bandwidth updates for cells.</td>
</tr>
<tr>
<td>enable_network_quota=False</td>
<td>(BoolOpt) Enables or disables quotaing of tenant networks</td>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for rabbitmq</caption>

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for redis</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>host=docwork</td>
<td>(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address. However, the node name must be valid within an AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address</td>
</tr>
<tr>
<td>host=127.0.0.1</td>
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>password=None</td>
<td>(StrOpt) Password for Redis server. (optional)</td>
</tr>
<tr>
<td>port=6379</td>
<td>(IntOpt) Use this port to connect to redis host.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for rpc</caption>
@ -12,8 +16,8 @@
</thead>
<tbody>
<tr>
<td>amqp_rpc_single_reply_queue=False</td>
<td>(BoolOpt) Enable a fast single reply queue if using AMQP based RPC like RabbitMQ or Qpid.</td>
<td>baseapi=None</td>
<td>(StrOpt) Set a version cap for messages sent to the base api in any service</td>
</tr>
<tr>
<td>control_exchange=openstack</td>
@ -28,7 +32,7 @@
<td>(IntOpt) Heartbeat time-to-live.</td>
</tr>
<tr>
<td>matchmaker_ringfile=/etc/nova/matchmaker_ring.json</td>
<td>ringfile=/etc/oslo/matchmaker_ring.json</td>
<td>(StrOpt) Matchmaker ring file (JSON)</td>
</tr>
<tr>
@ -55,6 +59,10 @@
<td>rpc_thread_pool_size=64</td>
<td>(IntOpt) Size of RPC thread pool</td>
</tr>
<tr>
<td>topics=notifications</td>
<td>(ListOpt) AMQP topic(s) used for openstack notifications</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for s3</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for scheduling</caption>
@ -13,7 +17,7 @@
<tbody>
<tr>
<td>cpu_allocation_ratio=16.0</td>
<td>(FloatOpt) Virtual CPU to Physical CPU allocation ratio</td>
<td>(FloatOpt) Virtual CPU to physical CPU allocation ratio which affects all CPU filters. This configuration specifies a global ratio for CoreFilter. For AggregateCoreFilter, it will fall back to this configuration value if no per-aggregate setting found.</td>
</tr>
<tr>
<td>disk_allocation_ratio=1.0</td>
@ -37,7 +41,11 @@
</tr>
<tr>
<td>ram_allocation_ratio=1.5</td>
<td>(FloatOpt) virtual ram to physical ram allocation ratio</td>
<td>(FloatOpt) Virtual ram to physical ram allocation ratio which affects all ram filters. This configuration specifies a global ratio for RamFilter. For AggregateRamFilter, it will fall back to this configuration value if no per-aggregate setting found.</td>
</tr>
<tr>
<td>ram_weight_multiplier=10.0</td>
<td>(FloatOpt) Multiplier used for weighing ram. Negative numbers mean to stack vs spread.</td>
</tr>
<tr>
<td>ram_weight_multiplier=1.0</td>
@ -63,6 +71,10 @@
<td>scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler</td>
<td>(StrOpt) Default driver to use for the scheduler</td>
</tr>
<tr>
<td>scheduler_filter_classes=nova.cells.filters.all_filters</td>
<td>(ListOpt) Filter classes the cells scheduler should use. An entry of "nova.cells.filters.all_filters"maps to all cells filters included with nova.</td>
</tr>
<tr>
<td>scheduler_host_manager=nova.scheduler.host_manager.HostManager</td>
<td>(StrOpt) The scheduler host manager class to use</td>
@ -95,6 +107,10 @@
<td>scheduler_topic=scheduler</td>
<td>(StrOpt) the topic scheduler nodes listen on</td>
</tr>
<tr>
<td>scheduler_weight_classes=nova.cells.weights.all_weighers</td>
<td>(ListOpt) Weigher classes the cells scheduler should use. An entry of "nova.cells.weights.all_weighers"maps to all cell weighers included with nova.</td>
</tr>
<tr>
<td>scheduler_weight_classes=nova.scheduler.weights.all_weighers</td>
<td>(ListOpt) Which weight class names to use for weighing hosts</td>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for spice</caption>
@ -20,8 +24,12 @@
<td>(BoolOpt) enable spice related features</td>
</tr>
<tr>
<td>html5proxy_base_url=http://127.0.0.1:6080/spice_auto.html</td>
<td>(StrOpt) location of spice html5 console proxy, in the form "http://127.0.0.1:6080/spice_auto.html"</td>
<td>enabled=False</td>
<td>(BoolOpt) Whether the V3 API is enabled or not</td>
</tr>
<tr>
<td>html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html</td>
<td>(StrOpt) location of spice html5 console proxy, in the form "http://127.0.0.1:6082/spice_auto.html"</td>
</tr>
<tr>
<td>keymap=en-us</td>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for testing</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for tilera</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for trustedcomputing</caption>

View File

@ -0,0 +1,60 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for upgrade_levels</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>cells=None</td>
<td>(StrOpt) Set a version cap for messages sent to local cells services</td>
</tr>
<tr>
<td>cert=None</td>
<td>(StrOpt) Set a version cap for messages sent to cert services</td>
</tr>
<tr>
<td>compute=None</td>
<td>(StrOpt) Set a version cap for messages sent to compute services</td>
</tr>
<tr>
<td>conductor=None</td>
<td>(StrOpt) Set a version cap for messages sent to conductor services</td>
</tr>
<tr>
<td>console=None</td>
<td>(StrOpt) Set a version cap for messages sent to console services</td>
</tr>
<tr>
<td>consoleauth=None</td>
<td>(StrOpt) Set a version cap for messages sent to consoleauth services</td>
</tr>
<tr>
<td>intercell=None</td>
<td>(StrOpt) Set a version cap for messages sent between cells services</td>
</tr>
<tr>
<td>network=None</td>
<td>(StrOpt) Set a version cap for messages sent to network services</td>
</tr>
<tr>
<td>scheduler=None</td>
<td>(StrOpt) Set a version cap for messages sent to scheduler services</td>
</tr>
<tr>
<td>scheduler=nova.cells.scheduler.CellsScheduler</td>
<td>(StrOpt) Cells scheduler to use</td>
</tr>
</tbody>
</table>
</para>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for v3api</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>enabled=False</td>
<td>(BoolOpt) enable spice related features</td>
</tr>
<tr>
<td>enabled=False</td>
<td>(BoolOpt) Whether the V3 API is enabled or not</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for vmware</caption>
@ -49,7 +53,7 @@
</tr>
<tr>
<td>vmwareapi_wsdl_loc=None</td>
<td>(StrOpt) VIM Service WSDL Location e.g http://&lt;server&gt;/vimService.wsdl. Due to a bug in vSphere ESX 4.1 default wsdl. Refer readme-vmware to setup</td>
<td>(StrOpt) Optional VIM Service WSDL Location e.g http://&lt;server&gt;/vimService.wsdl. Optional over-ride to default location for bug work-arounds</td>
</tr>
</tbody>
</table>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for vnc</caption>

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for volume</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<td>Configuration option=Default value</td>
<td>(Type) Description</td>
</tr>
</thead>
<tbody>
<tr>
<td>rbd_secret_uuid=None</td>
<td>(StrOpt) the libvirt uuid of the secret for the rbd_uservolumes</td>
</tr>
<tr>
<td>rbd_user=None</td>
<td>(StrOpt) the RADOS client name for accessing rbd volumes</td>
</tr>
<tr>
<td>scality_sofs_config=None</td>
<td>(StrOpt) Path or URL to Scality SOFS configuration file</td>
</tr>
<tr>
<td>scality_sofs_mount_point=$state_path/scality</td>
<td>(StrOpt) Base dir where Scality SOFS shall be mounted</td>
</tr>
</tbody>
</table>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for volumes</caption>
@ -19,6 +23,10 @@
<td>cinder_api_insecure=False</td>
<td>(BoolOpt) Allow to perform insecure SSL requests to cinder</td>
</tr>
<tr>
<td>cinder_ca_certificates_file=None</td>
<td>(StrOpt) Location of ca certicates file to use for cinder client requests.</td>
</tr>
<tr>
<td>cinder_catalog_info=volume:cinder:publicURL</td>
<td>(StrOpt) Info to match when looking for cinder in the service catalog. Format is : separated values of the form: &lt;service_type&gt;:&lt;service_name&gt;:&lt;endpoint_type&gt;</td>
@ -39,10 +47,30 @@
<td>force_volumeutils_v1=False</td>
<td>(BoolOpt) Force volumeutils v1</td>
</tr>
<tr>
<td>glusterfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Dir where the glusterfs volume is mounted on the compute node</td>
</tr>
<tr>
<td>iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal</td>
<td>(StrOpt) iSCSI IQN prefix used in baremetal volume connections.</td>
</tr>
<tr>
<td>nfs_mount_options=None</td>
<td>(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details</td>
</tr>
<tr>
<td>nfs_mount_point_base=$state_path/mnt</td>
<td>(StrOpt) Dir where the nfs volume is mounted on the compute node</td>
</tr>
<tr>
<td>num_aoe_discover_tries=3</td>
<td>(IntOpt) number of times to rediscover AoE target to find volume</td>
</tr>
<tr>
<td>num_iscsi_scan_tries=3</td>
<td>(IntOpt) number of times to rescan iSCSI target to find volume</td>
</tr>
<tr>
<td>os_region_name=None</td>
<td>(StrOpt) region name of this node</td>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for vpn</caption>
@ -28,12 +32,12 @@
<td>(StrOpt) Network to push into openvpn config</td>
</tr>
<tr>
<td>vpn_image_id=0</td>
<td>(StrOpt) image id used when starting up a cloudpipe vpn server</td>
<td>vpn_flavor=m1.tiny</td>
<td>(StrOpt) Flavor for vpn instances</td>
</tr>
<tr>
<td>vpn_instance_type=m1.tiny</td>
<td>(StrOpt) Instance type for vpn instances</td>
<td>vpn_image_id=0</td>
<td>(StrOpt) image id used when starting up a cloudpipe vpn server</td>
</tr>
<tr>
<td>vpn_ip=$my_ip</td>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for wsgi</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for xen</caption>
@ -121,7 +125,7 @@
</tr>
<tr>
<td>xenapi_disable_agent=False</td>
<td>(BoolOpt) Disable XenAPI agent. Reduces the amount of time it takes nova to detect that a VM has started, when that VM does not have the agent installed</td>
<td>(BoolOpt) Disables the use of the XenAPI agent in any image regardless of what image properties are present.</td>
</tr>
<tr>
<td>xenapi_image_upload_handler=nova.virt.xenapi.imageupload.glance.GlanceStore</td>
@ -195,6 +199,10 @@
<td>xenapi_torrent_seed_duration=3600</td>
<td>(IntOpt) Number of seconds after downloading an image via BitTorrent that it should be seeded for other peers.</td>
</tr>
<tr>
<td>xenapi_use_agent_default=False</td>
<td>(BoolOpt) Determines if the xenapi agent should be used when the image used does not contain a hint to declare if the agent is present or not. The hint is a glance property "xenapi_use_agent" that has the value "true" or "false". Note that waiting for the agent when it is not present will significantly increase server boot times.</td>
</tr>
<tr>
<td>xenapi_vhd_coalesce_max_attempts=5</td>
<td>(IntOpt) Max number of times to poll for VHD to coalesce. Used only if compute_driver=xenapi.XenAPIDriver</td>
@ -209,4 +217,4 @@
</tr>
</tbody>
</table>
</para>
</para>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for xvpnvncproxy</caption>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for zeromq</caption>
@ -20,7 +24,7 @@
<td>(IntOpt) Number of ZeroMQ contexts, defaults to 1</td>
</tr>
<tr>
<td>rpc_zmq_host=usagi</td>
<td>rpc_zmq_host=docwork</td>
<td>(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.</td>
</tr>
<tr>

View File

@ -1,4 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for zookeeper</caption>

View File

@ -0,0 +1,9 @@
*.DS_Store
*.egg*
*.log
*.mo
*.pyc
*.swo
*.swp
*.sqlite
*~

View File

@ -0,0 +1,105 @@
autogenerate-config-docs
========================
Automatically generate configuration tables to document OpenStack.
Dependencies: python-git (version: 0.3.2 RC1), oslo.config
Setting up your environment
---------------------------
Note: This tool is best run in a fresh VM environment, as running it
requires installing the dependencies of the particular OpenStack
product you are working with. Installing all of that on your normal
machine could leave you with a bunch of cruft!
First install git and python-pip,
$ sudo apt-get install git python-pip
next, install oslo.config and GitPython
$ sudo pip install oslo.config GitPython
then, checkout the repository you are working with:
$ git clone https://github.com/openstack/nova.git
(this guide makes reference to a /repos directory, so you should
record the directory you are using and replace as appropriate below)
and the tool itself:
$ git clone https://github.com/openstack/openstack-manuals.git
and finally, the dependencies for the product you are working with:
$ sudo pip install -r nova/requirements.txt
Now you are ready to use the tool.
Using the tool
--------------
This tool is divided into three parts:
1) Extraction of flags names
eg
$ ./autohelp.py --action create -i flagmappings/nova.flagmappings -o names --path /repos/nova
2) Grouping of flags
This is currently done manually, by using the flag name file and placing
a category after a space.
eg
$ head flagmappings/glance.flagmappings
admin\_password registry
admin\_role api
admin\_tenant\_name registry
admin\_user registry
...
3) Creation of docbook-formatted configuration table files
eg
$ ./autohelp.py --action create -i flagmappings/nova.flagmappings -o docbook --path /repos/nova
A worked example - updating the docs for H2
----------------------------------------------------
update automatically generated tables - from scratch
$ sudo apt-get update
$ sudo apt-get install git python-pip python-dev
$ sudo pip install git-review GitPython
$ git clone git://github.com/openstack/openstack-manuals.git
$ cd openstack-manuals/
$ git review -d 35726
$ cd tools/autogenerate-config-docs/
Now, cloning and installing requirements for nova, glance, quantum
$ for i in nova glance quantum; do git clone git://github.com/openstack/$i.git; done
$ for i in nova glance quantum; do sudo pip install -r $i/requirements.txt; done
This missed some requirements for nova, which were fixed by:
$ sudo pip install python-glanceclient websockify pyasn1 python-cinderclient error\_util
$ sudo apt-get install python-ldap python-lxml
Making the flag names update
./autohelp.py -vvv --action update -i flagmappings/nova.flagmappings -o names --path ~/nova | more
At this point, seach through flagmappings/nova.flagmappings.new for anything labelled Unknown and fix,
once that is done use:
./autohelp.py -vvv --action create -i flagmappings/nova.flagmappings -o docbook --path ~/nova
to generate the XML files and move those into the appropriate part ofthe git repo

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python
#
# A collection of tools for working with flags from OpenStack
# packages and documentation.
#
# For an example of usage, run this program with the -h switch.
#
import os
import sys
# this is for the internationalisation function in gettext
import __builtin__
__builtin__.__dict__['_'] = lambda x: x
import common
def main(action, file, format, repo, verbose=0, name=False, test=False):
package_name = common.git_check(repo)
sys.path.append(repo)
try:
__import__(package_name)
except ImportError as e:
if verbose >= 1:
print str(e)
print "Failed to import: %s (%s)" % (package_name, e)
if verbose >= 1:
flags = common.extract_flags(repo, package_name, verbose)
else:
flags = common.extract_flags(repo, package_name)
print "%s flags imported from package %s." % (len(flags),
str(package_name))
if action == "update":
common.update(file, flags, True, verbose)
return
if format == "names":
if verbose >= 1:
common.write_flags(file, flags, True, verbose)
else:
common.write_flags(file, flags, True)
if format == "docbook":
groups = common.populate_groups(file)
print "%s groups" % len(groups)
if verbose >= 1:
common.write_docbook('.', flags, groups, package_name, verbose)
else:
common.write_docbook('.', flags, groups, package_name)
sys.exit(0)
if __name__ == "__main__":
args = common.parse_me_args()
main(args['action'],
args['file'],
args['format'],
args['repo'],
args['verbose'],
args['name'],
args['test'])

View File

@ -0,0 +1,400 @@
#
# A collection of shared functions for managing help flag mapping files.
#
import os
import sys
import pkgutil
import glob
from collections import defaultdict
from xml.sax.saxutils import escape
from oslo.config import cfg
# gettext internationalisation function requisite:
import __builtin__
__builtin__.__dict__['_'] = lambda x: x
def git_check(repo_path):
from git import Repo
"""
Check a passed directory to verify it is a valid git repository.
"""
try:
repo = Repo(repo_path)
assert repo.bare is False
package_name = os.path.basename(repo.remotes.origin.url).rstrip('.git')
except:
print "\nThere is a problem verifying that the directory passed in"
print "is a valid git repoistory. Please try again.\n"
sys.exit(1)
return package_name
def populate_groups(filepath):
"""
Takes a file formatted with lines of config option and group
separated by a space and constructs a dictionary indexed by
group, which is returned..
"""
groups = defaultdict(list)
groups_file = open(os.path.expanduser(filepath), 'r')
for line in groups_file:
try:
option, group = line.split(None, 1)
except ValueError:
print "Couldn't read groups file line:%s" % line
print "Check for formatting errors - did you add the group?"
sys.exit(1)
groups[group.strip()].append(option)
return groups
def extract_flags(repo_location, module_name, verbose=0, names_only=True):
"""
Loops through the repository, importing module by module to
populate the configuration object (cfg.CONF) created from Oslo.
"""
usable_dirs = []
module_location = os.path.dirname(repo_location + '/' + module_name)
for root, dirs, files in os.walk(module_location + '/' + module_name):
for name in dirs:
abs_path = os.path.join(root.split(module_location)[1][1:], name)
if ('/tests' not in abs_path and '/locale' not in abs_path and
'/cmd' not in abs_path and '/db/migration' not in abs_path and
'/transfer' not in abs_path):
usable_dirs.append(os.path.join(root.split(module_location)[1][1:], name))
for directory in usable_dirs:
for python_file in glob.glob(module_location + '/' + directory + "/*.py"):
if '__init__' not in python_file:
usable_dirs.append(os.path.splitext(python_file)[0][len(module_location) + 1:])
package_name = directory.replace('/', '.')
try:
__import__(package_name)
if verbose >= 1:
print "imported %s" % package_name
except ImportError as e:
"""
work around modules that don't like being imported in this way
FIXME This could probably be better, but does not affect the
configuration options found at this stage
"""
if verbose >= 2:
print str(e)
print "Failed to import: %s (%s)" % (package_name, e)
continue
flags = cfg.CONF._opts.items()
#extract group information
for group in cfg.CONF._groups.keys():
flags = flags + cfg.CONF._groups[group]._opts.items()
flags.sort()
return flags
def extract_flags_test(repo_loc, module, verbose=0):
"""
TEST TEST TEST TEST TEST TEST
TEST TEST TEST TEST TEST TEST
Loops through the repository, importing module by module to
populate the configuration object (cfg.CONF) created from Oslo.
TEST TEST TEST TEST TEST TEST
TEST TEST TEST TEST TEST TEST
"""
flag_data = {}
flag_files = []
usable_dirs = []
module_location = os.path.dirname(repo_loc + '/' + module)
for root, dirs, files in os.walk(module_location + '/' + module):
for name in dirs:
abs_path = os.path.join(root.split(module_location)[1][1:], name)
if ('/tests' not in abs_path and '/locale' not in abs_path and
'/cmd' not in abs_path and '/db/migration' not in abs_path):
usable_dirs.append(os.path.join(root.split(module_location)[1][1:], name))
for directory in usable_dirs:
for python_file in glob.glob(module_location + '/' + directory + "/*.py"):
if '__init__' not in python_file:
usable_dirs.append(os.path.splitext(python_file)[0][len(module_location) + 1:])
package_name = directory.replace('/', '.')
try:
__import__(package_name)
if verbose >= 1:
print "imported %s" % package_name
flag_data[str(package_name)] = sorted(cfg.CONF._opts.items())
except ImportError as e:
"""
work around modules that don't like being imported in this way
FIXME This could probably be better, but does not affect the
configuration options found at this stage
"""
if verbose >= 2:
print str(e)
print "Failed to import: %s (%s)" % (package_name, e)
continue
return flag_data
def write_test(file, repo_dir, pkg_name):
"""
"""
file1 = file + ".test"
flags = extract_flags_test(repo_dir, pkg_name)
with open(file1, 'a+') as f:
f.write("\n")
for filename, flag_info in flags.iteritems():
f.write("\n -- start file name area --\n")
f.write(filename)
f.write("\n -- end file name area --\n")
print "\n -- start file name area --\n"
print filename
print "\n -- end file name area --\n"
print len(flag_info)
for name, value in flag_info:
opt = value['opt']
#print type(opt)
#print opt
#print name
#print value
f.write(name)
f.write("\n")
def write_header(filepath, verbose=0):
"""
Write header to output flag file.
"""
pass
def write_buffer(file, flags, verbose=0):
"""
Write flag data to file. (The header is written with the write_header function.)
"""
pass
#with open(os.path.expanduser(filepath), 'wb') as f:
def write_flags(filepath, flags, name_only=True, verbose=0):
"""
write out the list of flags in the cfg.CONF object to filepath
if name_only is True - write only a list of names, one per line,
otherwise use MediaWiki syntax to write out the full table with
help text and default values.
"""
with open(os.path.expanduser(filepath), 'wb') as f:
if not name_only:
f.write("{|\n") # start table
# print headers
f.write("!")
f.write("!!".join(["name", "default", "description"]))
f.write("\n|-\n")
for name, value in flags:
opt = value['opt']
if not opt.help:
opt.help = "No help text available for this option"
if not name_only:
f.write("|")
f.write("||".join([name,
str(opt.default),
opt.help.replace("\n", " ")]))
f.write("\n|-\n")
else:
f.write(name + "\n")
if not name_only:
f.write("|}\n") # end table
def write_docbook(directory, flags, groups, package_name, verbose=0):
"""
Prints a docbook-formatted table for every group of options.
"""
count = 0
for group in groups.items():
groups_file = open(package_name + '-' + group[0] + '.xml', 'w')
groups_file.write('<?xml version="1.0" encoding="UTF-8"?>\n\
<!-- Warning: Do not edit this file. It is automatically\n\
generated and your changes will be overwritten.\n\
The tool to do so lives in the tools directory of this\n\
repository -->\n\
<para xmlns="http://docbook.org/ns/docbook" version="5.0">\n\
<table rules="all">\n\
<caption>Description of configuration options for ' + group[0] +
'</caption>\n\
<col width="50%"/>\n\
<col width="50%"/>\n\
<thead>\n\
<tr>\n\
<td>Configuration option=Default value</td>\n\
<td>(Type) Description</td>\n\
</tr>\n\
</thead>\n\
<tbody>')
for flag_name in group[1]:
for flag in flags:
if flag[0] == flag_name:
count = count + 1
opt = flag[1]["opt"]
if not opt.help:
opt.help = "No help text available for this option"
if type(opt).__name__ == "ListOpt" and opt.default is not None:
opt.default = ",".join(opt.default)
groups_file.write('\n <tr>\n\
<td>' + flag_name + '=' + str(opt.default) + '</td>\n\
<td>(' + type(opt).__name__ + ') '
+ escape(opt.help) + '</td>\n\
</tr>')
groups_file.write('\n </tbody>\n\
</table>\n\
</para>')
groups_file.close()
def create(flag_file, repo_path):
"""
Create new flag mappings file, containing help information for
the project whose repo location has been passed in at the command line.
"""
# flag_file testing.
#try:
# Test for successful creation of flag_file.
#except:
# If the test(s) fail, exit noting the problem(s).
# repo_path git repo validity testing.
#try:
# Test to be sure the repo_path passed in is a valid directory
# and that directory is a valid existing git repo.
#except:
# If the test(s) fail, exit noting the problem(s).
# get as much help as possible, searching recursively through the
# entire repo source directory tree.
#help_data = get_help(repo_path)
# Write this information to the file.
#write_file(flag_file, help_data)
def update(filepath, flags, name_only=True, verbose=0):
"""
Update flag mappings file, adding or removing entries as needed.
This will update the file content, essentially overriding the data.
The primary difference between create and update is that create will
make a new file, and update will just work with the data that is
data that is already there.
"""
original_flags = []
updated_flags = []
write_flags(filepath + '.new', flags, name_only=True, verbose=0)
original_flag_file = open(filepath)
updated_flag_file = open(filepath + '.new', 'r')
for line in original_flag_file:
original_flags.append(line.split()[0])
for line in updated_flag_file:
updated_flags.append(line.rstrip())
updated_flag_file.close()
removed_flags = set(original_flags) - set(updated_flags)
added_flags = set(updated_flags) - set(original_flags)
print "\nRemoved Flags\n"
for line in sorted(removed_flags):
print line
print "\nAdded Flags\n"
for line in sorted(added_flags):
print line
updated_flag_file = open(filepath + '.new', 'wb')
original_flag_file.seek(0)
for line in original_flag_file:
flag_name = line.split()[0]
if flag_name not in removed_flags:
for added_flag in added_flags:
if flag_name > added_flag:
updated_flag_file.write(added_flag + ' Unknown\n')
added_flags.remove(added_flag)
break
updated_flag_file.write(line)
def verify(flag_file):
"""
Verify flag file contents. No actions are taken.
"""
pass
def usage():
print "\nUsage: %s docbook <groups file> <source loc>" % sys.argv[0]
print "\nGenerate a list of all flags for package in source loc and"\
"\nwrites them in a docbook table format, grouped by the groups"\
"\nin the groups file, one file per group.\n"
print "\n %s names <names file> <source loc>" % sys.argv[0]
print "\nGenerate a list of all flags names for the package in"\
"\nsource loc and writes them to names file, one per line \n"
def parse_me_args():
import argparse
parser = argparse.ArgumentParser(
description='Manage flag files, to aid in updatingdocumentation.',
epilog='Example: %(prog)s -a create -in ./nova.flagfile -fmt docbook\
-p /nova',
usage='%(prog)s [options]')
parser.add_argument('-a', '--action',
choices=['create', 'update', 'verify'],
dest='action',
help='action (create, update, verify) [REQUIRED]',
required=True,
type=str,)
# trying str data type... instead of file.
parser.add_argument('-i', '-in', '--input',
dest='file',
help='flag file being worked with [REQUIRED]',
required=True,
type=str,)
parser.add_argument('-f', '-fmt', '--format', '-o', '-out',
dest='format',
help='file output format (options: docbook, names)',
required=False,
type=str,)
# ..tried having 'dir' here for the type, but the git.Repo function
# requires a string is passed to it.. a directory won't work.
parser.add_argument('-p', '--path',
dest='repo',
help='path to valid git repository [REQUIRED]',
required=True,
type=str,)
parser.add_argument('-v', '--verbose',
action='count',
default=0,
dest='verbose',
required=False,)
parser.add_argument('-no', '--name_only',
action='store_true',
dest='name',
help='whether output should contain names only',
required=False,)
parser.add_argument('-test',
action='store_true',
dest='test',
help=argparse.SUPPRESS,
required=False,)
args = vars(parser.parse_args())
return args

View File

@ -0,0 +1,142 @@
admin_password keystone_authtoken
admin_tenant_name keystone_authtoken
admin_token keystone_authtoken
admin_user keystone_authtoken
allowed_rpc_exception_modules
auth_admin_prefix
auth_host keystone_authtoken
auth_port keystone_authtoken
auth_protocol keystone_authtoken
auth_strategy DEFAULT
auth_uri keystone_authtoken
auth_version keystone_authtoken
backdoor_port DEFAULT
backend database
cache
certfile keystone_authtoken
cinder_control_exchange DEFAULT
connection database
connection_debug database
connection_trace database
control_exchange DEFAULT
counter_source DEFAULT
database_connection DEFAULT
debug logging
default_log_levels logging
default_notification_level logging
default_publisher_id DEFAULT
delay_auth_decision
disable_process_locking
enable_v1_api DEFAULT
fake_rabbit
fatal_deprecations
glance_control_exchange DEFAULT
host
host
host
http_connect_timeout
http_handler
hypervisor_inspector
idle_timeout
instance_format
instance_uuid_format
keyfile keystone_authtoken
kombu_ssl_ca_certs
kombu_ssl_certfile
kombu_ssl_keyfile
kombu_ssl_version
libvirt_type
libvirt_uri
list_notifier_drivers
lock_path
log_config logging
log_date_format logging
log_dir logging
log_file logging
log_format logging
logging_context_format_string logging
logging_debug_format_suffix logging
logging_default_format_string logging
logging_exception_prefix logging
matchmaker_heartbeat_freq
matchmaker_heartbeat_ttl
max_overflow
max_pool_size
max_retries
memcache_secret_key
memcache_security_strategy
memcache_servers
memcached_servers
metering_secret DEFAULT
metering_topic DEFAULT
min_pool_size database
mysql_engine database
notification_driver
notification_topics
nova_control_exchange DEFAULT
os_auth_url service_credentials
os_password service_credentials
os_tenant_id service_credentials
os_tenant_name service_credentials
os_username service_credentials
password
pipeline_cfg_file
policy_default_rule
policy_file
pool_timeout
port
port
publish_errors
qpid_heartbeat qpid
qpid_hostname qpid
qpid_hosts qpid
qpid_password qpid
qpid_port qpid
qpid_protocol qpid
qpid_sasl_mechanisms qpid
qpid_tcp_nodelay qpid
qpid_username qpid
quantum_control_exchange DEFAULT
rabbit_durable_queues rabbitmq
rabbit_ha_queues rabbitmq
rabbit_host rabbitmq
rabbit_hosts rabbitmq
rabbit_max_retries rabbitmq
rabbit_password rabbitmq
rabbit_port rabbitmq
rabbit_retry_backoff rabbitmq
rabbit_retry_interval rabbitmq
rabbit_use_ssl rabbitmq
rabbit_userid rabbitmq
rabbit_virtual_host rabbitmq
reseller_prefix DEFAULT
reserved_metadata_length
reserved_metadata_namespace
retry_interval
revocation_cache_time
ringfile
rpc_backend rpc
rpc_cast_timeout rpc
rpc_conn_pool_size rpc
rpc_response_timeout rpc
rpc_thread_pool_size rpc
rpc_zmq_bind_address rpc
rpc_zmq_contexts rpc
rpc_zmq_host rpc
rpc_zmq_ipc_dir rpc
rpc_zmq_matchmaker rpc
rpc_zmq_port rpc
rpc_zmq_topic_backlog rpc
signing_dir keystone_authtoken
slave_connection
sqlite_db database
sqlite_synchronous database
syslog_log_facility
token_cache_time
topics
udp_address
udp_port
use_stderr
use_syslog
use_tpool database
verbose logging

View File

@ -0,0 +1,297 @@
allowed_rpc_exception_modules rpc
amqp_rpc_single_reply_queue rpc
api_paste_config api
api_rate_limit api
auth_strategy auth
available_devices storage
backdoor_port api
backend storage
backlog log
backup_api_class backups
backup_ceph_chunk_size backups
backup_ceph_conf backups
backup_ceph_pool backups
backup_ceph_stripe_count backups
backup_ceph_stripe_unit backups
backup_ceph_user backups
backup_driver backups
backup_manager backups
backup_name_template backups
backup_topic backups
bindir storage
capacity_weight_multiplier storage
cinder_huawei_conf_file storage
cloned_volume_same_az zones
connection connection
connection_debug connection
connection_trace connection
connection_type connection
control_exchange rpc
coraid_esm_address storage
coraid_group storage
coraid_password storage
coraid_repository_key storage
coraid_user storage
db_backend database
db_driver database
debug common
default_log_levels common
default_notification_level common
default_publisher_id common
default_volume_type common
disable_process_locking common
enable_new_services common
enable_v1_api api
enable_v2_api api
enabled_backends storage
fake_rabbit rpc
fatal_deprecations common
fatal_exception_format_errors common
glance_api_insecure images
glance_api_servers images
glance_api_ssl_compression images
glance_api_version images
glance_host images
glance_num_retries images
glance_port images
glusterfs_disk_util storage
glusterfs_mount_point_base storage
glusterfs_shares_config storage
glusterfs_sparsed_volumes storage
gpfs_images_dir images
gpfs_images_share_mode images
gpfs_max_clone_depth images
gpfs_mount_point_base images
gpfs_sparse_volumes images
hds_cinder_config_file storage
host common
host common
idle_timeout common
iet_conf common
image_conversion_dir images
instance_format images
instance_uuid_format images
iscsi_helper storage
iscsi_iotype storage
iscsi_ip_address storage
iscsi_num_targets storage
iscsi_port storage
iscsi_target_prefix storage
kombu_ssl_ca_certs rpc
kombu_ssl_certfile rpc
kombu_ssl_keyfile rpc
kombu_ssl_version rpc
lio_initiator_iqns common
lock_path common
log_config common
log_date_format common
log_dir common
log_file common
log_format common
logging_context_format_string common
logging_debug_format_suffix common
logging_default_format_string common
logging_exception_prefix common
lvm_mirrors storage
matchmaker_heartbeat_freq rpc
matchmaker_heartbeat_ttl rpc
matchmaker_ringfile rpc
max_age storage
max_gigabytes storage
max_overflow storage
max_pool_size storage
max_retries storage
memcached_servers storage
migration_create_volume_timeout_secs storage
min_pool_size storage
monkey_patch common
monkey_patch_modules common
my_ip common
netapp_login storage
netapp_password storage
netapp_server_hostname storage
netapp_server_port storage
netapp_size_multiplier storage
netapp_storage_family storage
netapp_storage_protocol storage
netapp_transport_type storage
netapp_vfiler storage
netapp_volume_list storage
netapp_vserver storage
nexenta_blocksize storage
nexenta_host storage
nexenta_iscsi_target_portal_port storage
nexenta_password storage
nexenta_rest_port storage
nexenta_rest_protocol storage
nexenta_sparse storage
nexenta_target_group_prefix storage
nexenta_target_prefix storage
nexenta_user storage
nexenta_volume storage
nfs_mount_options storage
nfs_mount_point_base storage
nfs_oversub_ratio storage
nfs_shares_config storage
nfs_sparsed_volumes storage
nfs_used_ratio storage
no_snapshot_gb_quota common
notification_driver rpc
notification_topics rpc
num_iscsi_scan_tries common
num_shell_tries common
osapi_max_limit api
osapi_max_request_body_size api
osapi_volume_base_URL api
osapi_volume_ext_list api
osapi_volume_extension api
password common
policy_default_rule common
policy_file common
pool_size common
port common
publish_errors rpc
pybasedir common
qpid_heartbeat rpc
qpid_hostname rpc
qpid_hosts rpc
qpid_password rpc
qpid_port rpc
qpid_protocol rpc
qpid_sasl_mechanisms rpc
qpid_tcp_nodelay rpc
qpid_username rpc
quota_driver common
quota_gigabytes common
quota_snapshots common
quota_volumes common
rabbit_durable_queues rpc
rabbit_ha_queues rpc
rabbit_host rpc
rabbit_hosts rpc
rabbit_max_retries rpc
rabbit_password rpc
rabbit_port rpc
rabbit_retry_backoff rpc
rabbit_retry_interval rpc
rabbit_use_ssl rpc
rabbit_userid rpc
rabbit_virtual_host rpc
rbd_ceph_conf storage
rbd_flatten_volume_from_snapshot storage
rbd_pool storage
rbd_secret_uuid storage
rbd_user storage
reservation_expire common
reserved_percentage common
retry_interval common
root_helper common
rootwrap_config common
rpc_backend rpc
rpc_cast_timeout rpc
rpc_conn_pool_size rpc
rpc_response_timeout rpc
rpc_thread_pool_size rpc
rpc_zmq_bind_address rpc
rpc_zmq_contexts rpc
rpc_zmq_host rpc
rpc_zmq_ipc_dir rpc
rpc_zmq_matchmaker rpc
rpc_zmq_port rpc
rpc_zmq_topic_backlog rpc
run_external_periodic_tasks common
san_clustername storage
san_ip storage
san_is_local storage
san_login storage
san_password storage
san_private_key storage
san_ssh_port storage
san_thin_provision storage
san_zfs_volume_base storage
scality_sofs_config storage
scality_sofs_mount_point storage
scality_sofs_volume_dir storage
scheduler_default_filters scheduler
scheduler_default_weighers scheduler
scheduler_driver scheduler
scheduler_host_manager scheduler
scheduler_json_config_location scheduler
scheduler_manager scheduler
scheduler_max_attempts scheduler
scheduler_topic scheduler
service_down_time common
sf_account_prefix storage
sf_allow_tenant_qos storage
sf_emulate_512 storage
snapshot_name_template backup
snapshot_same_host backup
sqlite_db common
sqlite_synchronous common
ssh_conn_timeout common
ssh_max_pool_conn common
ssh_min_pool_conn common
ssl_ca_file common
ssl_cert_file common
ssl_key_file common
state_path common
storage_availability_zone common
storwize_svc_connection_protocol storage
storwize_svc_flashcopy_timeout storage
storwize_svc_multihostmap_enabled storage
storwize_svc_multipath_enabled storage
storwize_svc_vol_autoexpand storage
storwize_svc_vol_compression storage
storwize_svc_vol_easytier storage
storwize_svc_vol_grainsize storage
storwize_svc_vol_rsize storage
storwize_svc_vol_warning storage
storwize_svc_volpool_name storage
syslog_log_facility common
tcp_keepidle common
topics common
transfer_api_class api
until_refresh common
use_default_quota_class common
use_forwarded_for common
use_multipath_for_image_xfer images
use_stderr common
use_syslog common
use_tpool common
verbose common
volume_api_class api
volume_backend_name storage
volume_clear storage
volume_clear_size storage
volume_dd_blocksize storage
volume_driver storage
volume_group storage
volume_manager storage
volume_name_template storage
volume_tmp_dir storage
volume_topic storage
volume_transfer_key_length storage
volume_transfer_salt_length storage
volume_usage_audit_period storage
volumes_dir storage
windows_iscsi_lun_path storage
xenapi_connection_password api
xenapi_connection_url api
xenapi_connection_username api
xenapi_nfs_server api
xenapi_nfs_serverpath api
xenapi_sr_base_path api
xiv_proxy storage
zadara_default_cache_policy storage
zadara_default_encryption storage
zadara_default_stripesize storage
zadara_default_striping_mode storage
zadara_password storage
zadara_user storage
zadara_vol_name_template storage
zadara_vpsa_allow_nonexistent_delete storage
zadara_vpsa_auto_detach_on_delete storage
zadara_vpsa_ip storage
zadara_vpsa_poolname storage
zadara_vpsa_port storage
zadara_vpsa_use_ssl storage

View File

@ -0,0 +1,131 @@
admin_password registry
admin_role api
admin_tenant_name registry
admin_user registry
allow_additional_image_properties common
allow_anonymous_access api
api_limit_max common
auth_region registry
auth_strategy registry
auth_url registry
backlog common
bind_host common
bind_port common
ca_file ssl
cert_file ssl
cleanup_scrubber imagecache
cleanup_scrubber_time imagecache
config_file paste
data_api common
db_auto_create api
debug logging
default_log_levels loggin
default_store api
delayed_delete imagecache
enable_v1_api api
enable_v2_api api
fatal_deprecations logging
filesystem_store_datadir filesystem
flavor paste
image_cache_dir imagecache
image_cache_driver imagecache
image_cache_max_size imagecache
image_cache_sqlite_db imagecache
image_cache_stall_time imagecache
image_size_cap api
instance_format logging
instance_uuid_format logging
key_file ssl
known_stores api
limit_param_default common
log_config logging
log_date_format logging
log_dir logging
log_file logging
log_format logging
logfile_mode logging
logging_context_format_string logging
logging_debug_format_suffix logging
logging_default_format_string logging
logging_exception_prefix logging
metadata_encryption_key common
notifier_strategy common
owner_is_tenant api
policy_default_rule policy
policy_file policy
publish_errors logging
pydev_worker_debug_host testing
pydev_worker_debug_port testing
qpid_heartbeat qpid
qpid_hostname qpid
qpid_notification_exchange qpid
qpid_notification_topic qpid
qpid_password qpid
qpid_port qpid
qpid_protocol qpid
qpid_reconnect_interval qpid
qpid_reconnect_interval_max qpid
qpid_reconnect_interval_min qpid
qpid_reconnect_limit qpid
qpid_reconnect_timeout qpid
qpid_sasl_mechanisms qpid
qpid_tcp_nodelay qpid
qpid_username qpid
rabbit_durable_queues rabbitmq
rabbit_host rabbitmq
rabbit_max_retries rabbitmq
rabbit_notification_exchange rabbitmq
rabbit_notification_topic rabbitmq
rabbit_password rabbitmq
rabbit_port rabbitmq
rabbit_retry_backoff rabbitmq
rabbit_retry_max_backoff rabbitmq
rabbit_use_ssl rabbitmq
rabbit_userid rabbitmq
rabbit_virtual_host rabbitmq
rbd_store_ceph_conf rbd
rbd_store_chunk_size rbd
rbd_store_pool rbd
rbd_store_user rbd
registry_client_ca_file registry
registry_client_cert_file registry
registry_client_insecure registry
registry_client_key_file registry
registry_client_protocol registry
registry_client_timeout registry
registry_host registry
registry_port registry
s3_store_access_key s3
s3_store_bucket s3
s3_store_bucket_url_format s3
s3_store_create_bucket_on_put s3
s3_store_host s3
s3_store_object_buffer_dir s3
s3_store_secret_key s3
scrub_time imagecache
scrubber_datadir imagecache
show_image_direct_url common
sql_connection db
sql_idle_timeout db
sql_max_retries db
sql_retry_interval db
swift_enable_snet swift
swift_store_admin_tenants swift
swift_store_auth_address swift
swift_store_auth_version swift
swift_store_container swift
swift_store_create_container_on_put swift
swift_store_endpoint_type swift
swift_store_key swift
swift_store_large_object_chunk_size swift
swift_store_large_object_size swift
swift_store_multi_tenant swift
swift_store_region swift
swift_store_service_type swift
swift_store_user swift
syslog_log_facility logging
tcp_keepidle wsgi
use_stderr logging
use_syslog logging
verbose logging
workers common

View File

@ -0,0 +1,271 @@
add_meta_server_route bigswitch
address brocade
admin_password common
admin_tenant_name common
admin_user common
agent_down_time agent
allow_bulk api
allow_overlapping_ips policy
allow_pagination api
allow_sorting api
allowed_rpc_exception_modules common
api_extensions_path api
api_paste_config api
auth_region common
auth_strategy common
auth_strategy metadata
auth_url common
backdoor_port testing
backend db
backlog wsgi
base_mac common
bind_host common
bind_port common
bridge_mappings openvswitch
cert_file nec
concurrent_connections nicira
connection db
connection_debug db
connection_trace db
control_exchange rpc
core_plugin common
daemon_endpoint mlnx
debug logging
default_flavor meta
default_interface_name nicira
default_l2_gw_service_uuid nicira
default_l3_flavor meta
default_l3_gw_service_uuid nicira
default_log_levels logging
default_notification_level notifier
default_publisher_id notifier
default_quota quotas
default_transport_type nicira
default_tz_uuid nicira
dhcp_agent_notification common
dhcp_agents_per_network db
dhcp_lease_duration common
director_server plumgrid
director_server_port plumgrid
disable_process_locking common
driver nec
driver_fqn lbaas
enable_metadata_access_network nicira
enable_packet_filter nec
enable_tunneling openvswitch
extension_map meta
external_pids agent
fake_rabbit testing
fatal_deprecations logging
firewall_driver securitygroups
flat_networks ml2
force_gateway_on_subnet common
host cisco
host common
host nec
host rpc
http_timeout nicira
idle_timeout db
instance_format logging
instance_uuid_format logging
int_peer_patch_port openvswitch
integration_bridge openvswitch
key_file ssl
kombu_ssl_ca_certs kombu
kombu_ssl_certfile kombu
kombu_ssl_keyfile kombu
kombu_ssl_version kombu
l3_plugin_list meta
loadbalancer_pool_scheduler_driver lbaas
local_ip openvswitch
local_network_vswitch hyperv
lock_path common
log_config logging
log_date_format logging
log_dir logging
log_file logging
log_format logging
logging_context_format_string logging
logging_debug_format_suffix logging
logging_default_format_string logging
logging_exception_prefix logging
mac_generation_retries common
matchmaker_heartbeat_freq rpc
matchmaker_heartbeat_ttl rpc
max_dns_nameservers common
max_fixed_ips_per_port common
max_lp_per_bridged_ls nicira
max_lp_per_overlay_ls nicira
max_overflow db
max_pool_size db
max_retries db
max_router_rules bigswitch
max_routes quotas
max_subnet_host_routes common
mechanism_drivers ml2
metadata_mode nicira
metadata_proxy_socket metadata
metadata_router_id metadata
midonet_uri midonet
min_pool_size db
mode midonet
model_class cisco
network_auto_schedule scheduler
network_scheduler_driver scheduler
network_vlan_ranges common
network_vlan_ranges hyperv
network_vlan_ranges openvswitch
neutron_id bigswitch
nexus_driver cisco
nexus_plugin cisco
node_override_vif_802.1qbg bigswitch
node_override_vif_802.1qbh bigswitch
node_override_vif_binding_failed bigswitch
node_override_vif_bridge bigswitch
node_override_vif_hyperv bigswitch
node_override_vif_ivs bigswitch
node_override_vif_other bigswitch
node_override_vif_ovs bigswitch
node_override_vif_unbound bigswitch
notification_driver notifier
notification_topics notifier
nvp_cluster_uuid nicira
nvp_controllers nicira
nvp_gen_timeout nicira
nvp_password nicira
nvp_user nicira
openflow_rest_api ryu
ostype brocade
ovsdb_interface ryu
ovsdb_ip ryu
ovsdb_port ryu
pagination_max_limit api
password brocade
password midonet
password plumgrid
password rpc
periodic_fuzzy_delay common
periodic_interval common
physical_interface brocade
physical_interface_mappings linuxbridge
physical_network_vswitch_mappings hyperv
plugin_list meta
policy_file policy
polling_interval hyperv
pool_timeout db
port nec
port rpc
project_id midonet
provider_router_id midonet
provider_vlan_aiuto_create cisco
provider_vlan_auto_trunk cisco
provider_vlan_name_prefix cisco
publish_errors logging
qpid_heartbeat qpid
qpid_hostname qpid
qpid_hosts qpid
qpid_password qpid
qpid_port qpid
qpid_protocol qpid
qpid_sasl_mechanisms qpid
qpid_tcp_nodelay qpid
qpid_username qpid
quota_driver quotas
quota_firewall quotas
quota_firewall_policy quotas
quota_firewall_rule quotas
quota_floatingip quotas
quota_items quotas
quota_network quotas
quota_network_gateway quotas
quota_packet_filter quotas
quota_port quotas
quota_router quotas
quota_security_group quotas
quota_security_group_rule quotas
quota_subnet quotas
rabbit_durable_queues rabbitmq
rabbit_ha_queues rabbitmq
rabbit_host rabbitmq
rabbit_hosts rabbitmq
rabbit_max_retries rabbitmq
rabbit_password rabbitmq
rabbit_port rabbitmq
rabbit_retry_backoff rabbitmq
rabbit_retry_interval rabbitmq
rabbit_use_ssl rabbitmq
rabbit_userid rabbitmq
rabbit_virtual_host rabbitmq
redirects nicira
report_interval agent
req_timeout ml2
request_timeout mlnx
retries nicira
retry_intervali db
retry_until_window wsgi
ringfile rpc
root_helper common
router_auto_schedule scheduler
router_scheduler_driver scheduler
rpc_backend rpc
rpc_cast_timeout rpc
rpc_conn_pool_size rpc
rpc_response_timeout rpc
rpc_support_old_agents rpc
rpc_thread_pool_size rpc
rpc_zmq_bind_address zeromq
rpc_zmq_contexts zeromq
rpc_zmq_host zeromq
rpc_zmq_ipc_dir zeromq
rpc_zmq_matchmaker zeromq
rpc_zmq_port zeromq
rpc_zmq_topic_backlog zeromq
run_external_periodic_tasks api
server_auth bigswitch
server_ssl bigswitch
server_timeout bigswitch
servers bigswitch
servertimeout plumgrid
service_plugins api
service_provider api
slave_connection db
sqlite_db db
sqlite_synchronous db
ssl_ca_file ssl
ssl_cert_file ssl
ssl_key_file ssl
state_path common
supported_extension_aliases meta
svi_round_robin cisco
sync_data bigswitch
syslog_log_facility logging
tcp_keepidle wsgi
tenant_default_router_rule bigswitch
tenant_network_type hyperv
tenant_network_type linuxbridge
tenant_network_type openvswitch
tenant_network_types ml2
topics rpc
tun_peer_patch_port openvswitch
tunnel_bridge openvswitch
tunnel_id_ranges openvswitch
tunnel_interface ryu
tunnel_ip ryu
tunnel_key_max ryu
tunnel_key_min ryu
tunnel_type openvswitch
tunnel_types openvswitch
type_drivers ml2
use_ssl nec
use_ssl ssl
use_stderr logging
use_syslog logging
use_tpool db
username brocade
username midonet
username plumgrid
verbose logging
vif_type bigswitch
vlan_name_prefix cisco
vnic_type mlnx
vswitch_plugin cisco

View File

@ -0,0 +1,607 @@
address zookeeper
agent_enabled spice
agent_resetnetwork_timeout xen
agent_timeout xen
agent_version_timeout xen
allow_instance_snapshots policy
allow_migrate_to_same_host policy
allow_resize_to_same_host policy
allow_same_net_traffic network
allowed_direct_url_schemes glance
allowed_rpc_exception_modules testing
api_paste_config wsgi
api_rate_limit authentication
attestation_api_url trustedcomputing
attestation_auth_blob trustedcomputing
attestation_auth_timeout trustedcomputing
attestation_port trustedcomputing
attestation_server trustedcomputing
attestation_server_ca_file trustedcomputing
auth_strategy authentication
auto_assign_floating_ip network
backdoor_port testing
bandwidth_poll_interval quota
bandwidth_update_interval quota
base_dir_name compute
baseapi rpc
bindir common
block_device_creation_timeout volumes
block_migration_flag hypervisor
boot_script_template vpn
buckets_path s3
ca_file ca
ca_path ca
cache_images xen
call_timeout cells
capabilities cells
cell_type cells
cells upgrade_levels
cells_config cells
cert upgrade_levels
cert_manager ca
cert_topic ca
checksum_base_images hypervisor
checksum_interval_seconds compute
cinder_api_insecure volumes
cinder_ca_certificates_file volumes
cinder_catalog_info volumes
cinder_cross_az_attach volumes
cinder_endpoint_template volumes
cinder_http_retries volumes
cnt_vpn_clients network
compute upgrade_levels
compute_api_class compute
compute_driver compute
compute_manager compute
compute_stats_class compute
compute_topic common
conductor upgrade_levels
config_drive_cdrom configdrive
config_drive_format configdrive
config_drive_inject_password configdrive
config_drive_skip_versions configdrive
config_drive_tempdir configdrive
console upgrade_levels
console_driver xen
console_host compute
console_manager compute
console_public_hostname console
console_token_ttl console
console_topic common
console_vmrc_error_retries xen
console_vmrc_port xen
console_xvp_conf xen
console_xvp_conf_template xen
console_xvp_log xen
console_xvp_multiplex_port xen
console_xvp_pid xen
consoleauth upgrade_levels
consoleauth_manager console
consoleauth_topic common
control_exchange rpc
cpu_allocation_ratio scheduling
create_unique_mac_address_attempts network
crl_file ca
db_backend baremetal
db_backend db
db_check_interval db
db_driver db
dbapi_use_tpool db
debug logging
default_access_ip_network_name network
default_availability_zone availabilityzones
default_ephemeral_format hypervisor
default_flavor compute
default_floating_pool network
default_log_levels logging
default_notification_level compute
default_os_type xen
default_publisher_id compute
default_schedule_zone availabilityzones
defer_iptables_apply network
deploy_kernel baremetal
deploy_ramdisk baremetal
dhcp_domain network
dhcp_lease_time network
dhcpbridge network
dhcpbridge_flagfile network
disable_process_locking common
disk_allocation_ratio scheduling
disk_cachemodes hypervisor
dmz_cidr vpn
dmz_mask vpn
dmz_net vpn
dns_server network
dns_update_periodic_interval network
dnsmasq_config_file network
driver baremetal
driver cells
ec2_dmz_host ec2
ec2_host ec2
ec2_listen ec2
ec2_listen_port ec2
ec2_path ec2
ec2_port ec2
ec2_private_dns_show_ip ec2
ec2_scheme ec2
ec2_strict_validation ec2
ec2_timestamp_expiry ec2
ec2_workers ec2
enable cells
enable_instance_password compute
enable_network_quota quota
enable_new_services api
enabled spice
enabled v3api
enabled_apis api
enabled_ssl_apis api
extensions_blacklist apiv3
extensions_whitelist apiv3
fake_call testing
fake_network testing
fake_rabbit testing
fatal_deprecations logging
fatal_exception_format_errors logging
firewall_driver network
fixed_ip_disassociate_timeout network
fixed_range network
fixed_range_v6 ipv6
flat_injected network
flat_interface network
flat_network_bridge network
flat_network_dns network
floating_ip_dns_manager network
force_config_drive configdrive
force_dhcp_release network
force_raw_images hypervisor
force_snat_range network
force_volumeutils_v1 volumes
forward_bridge_interface network
fping_path fping
gateway network
gateway_v6 ipv6
glance_api_insecure glance
glance_api_servers glance
glance_host glance
glance_num_retries glance
glance_port glance
glance_protocol glance
glusterfs_mount_point_base volumes
heal_instance_info_cache_interval compute
host common
host redis
host_state_interval compute
html5proxy_base_url spice
image_cache_manager_interval compute
image_decryption_dir s3
image_info_filename_pattern compute
inject_password hypervisor
injected_network_template network
injected_network_template network
instance_build_timeout compute
instance_dns_domain network
instance_dns_manager network
instance_format logging
instance_name_template api
instance_type_extra_specs baremetal
instance_update_num_instances cells
instance_updated_at_threshold cells
instance_usage_audit compute
instance_usage_audit_period compute
instance_uuid_format logging
instances_path compute
instances_path_share hyperv
integration_bridge vmware
intercell upgrade_levels
internal_service_availability_zone availabilityzones
ipmi_power_retry baremetal
iptables_bottom_regex network
iptables_drop_action network
iptables_top_regex network
ipv6_backend ipv6
iqn_prefix xen
iscsi_iqn_prefix volumes
isolated_hosts scheduling
isolated_images scheduling
key_file ca
keymap spice
keymgr_api_class keymgr
keys_path ca
keystone_ec2_url ec2
kombu_ssl_ca_certs kombu
kombu_ssl_certfile kombu
kombu_ssl_keyfile kombu
kombu_ssl_version kombu
l3_lib network
ldap_dns_base_dn ldap
ldap_dns_password ldap
ldap_dns_servers ldap
ldap_dns_soa_expiry ldap
ldap_dns_soa_hostmaster ldap
ldap_dns_soa_minimum ldap
ldap_dns_soa_refresh ldap
ldap_dns_soa_retry ldap
ldap_dns_url ldap
ldap_dns_user ldap
libvirt_cpu_mode hypervisor
libvirt_cpu_model hypervisor
libvirt_disk_prefix hypervisor
libvirt_images_type hypervisor
libvirt_images_volume_group hypervisor
libvirt_inject_key hypervisor
libvirt_inject_partition hypervisor
libvirt_inject_password hypervisor
libvirt_iscsi_use_multipath hypervisor
libvirt_lvm_snapshot_size hypervisor
libvirt_nonblocking hypervisor
libvirt_ovs_bridge hypervisor
libvirt_snapshot_compression hypervisor
libvirt_snapshots_directory hypervisor
libvirt_sparse_logical_volumes hypervisor
libvirt_type hypervisor
libvirt_uri hypervisor
libvirt_use_virtio_for_bridges hypervisor
libvirt_vif_driver hypervisor
libvirt_volume_drivers hypervisor
libvirt_wait_soft_reboot_seconds hypervisor
limit_cpu_features hyperv
linuxnet_interface_driver network
linuxnet_ovs_integration_bridge network
live_migration_bandwidth livemigration
live_migration_flag livemigration
live_migration_retry_count livemigration
live_migration_uri livemigration
lock_path common
lockout_attempts ec2
lockout_minutes ec2
lockout_window ec2
log_config logging
log_date_format logging
log_dir logging
log_file logging
log_format logging
logging_context_format_string logging
logging_debug_format_suffix logging
logging_default_format_string logging
logging_exception_prefix logging
manager cells
manager conductor
matchmaker_heartbeat_freq rpc
matchmaker_heartbeat_ttl rpc
max_age policy
max_hop_count cells
max_instances_per_host scheduling
max_io_ops_per_host scheduling
max_kernel_ramdisk_size xen
memcached_servers common
metadata_host metadata
metadata_listen metadata
metadata_listen_port metadata
metadata_manager metadata
metadata_port metadata
metadata_workers metadata
mkisofs_cmd configdrive
monkey_patch testing
monkey_patch_modules testing
multi_host network
multi_instance_display_name_template api
mute_child_interval cells
mute_weight_multiplier cells
mute_weight_value cells
my_ip common
name cells
net_config_template baremetal
network upgrade_levels
network_allocate_retries network
network_api_class network
network_device_mtu network
network_driver network
network_manager network
network_size network
network_topic network
networks_path network
neutron_admin_auth_url neutron
neutron_admin_password neutron
neutron_admin_tenant_name neutron
neutron_admin_username neutron
neutron_api_insecure neutron
neutron_auth_strategy neutron
neutron_default_tenant_id neutron
neutron_extension_sync_interval neutron
neutron_metadata_proxy_shared_secret neutron
neutron_ovs_bridge neutron
neutron_region_name neutron
neutron_url neutron
neutron_url_timeout neutron
nfs_mount_options volumes
nfs_mount_point_base volumes
non_inheritable_image_properties api
notification_driver common
notification_topics common
notify_api_faults common
notify_on_state_change common
novncproxy_base_url vnc
null_kernel api
num_aoe_discover_tries volumes
num_iscsi_scan_tries volumes
num_networks network
os_region_name volumes
osapi_compute_ext_list api
osapi_compute_extension api
osapi_compute_link_prefix api
osapi_compute_listen api
osapi_compute_listen_port api
osapi_compute_unique_server_name_scope policy
osapi_compute_workers api
osapi_glance_link_prefix glance
osapi_hide_server_address_states api
osapi_max_limit policy
osapi_max_request_body_size policy
password redis
password_length policy
periodic_enable periodic
periodic_fuzzy_delay periodic
policy_default_rule policy
policy_file policy
port redis
power_manager baremetal
powervm_img_local_path powervm
powervm_img_remote_path powervm
powervm_mgr powervm
powervm_mgr_passwd powervm
powervm_mgr_type powervm
powervm_mgr_user powervm
preallocate_images hypervisor
project_cert_subject ca
public_interface network
publish_errors logging
pxe_append_params baremetal
pxe_config_template baremetal
pxe_deploy_timeout baremetal
pxe_network_config baremetal
pybasedir common
qemu_img_cmd hyperv
qpid_heartbeat qpid
qpid_hostname qpid
qpid_hosts qpid
qpid_password qpid
qpid_port qpid
qpid_protocol qpid
qpid_sasl_mechanisms qpid
qpid_tcp_nodelay qpid
qpid_username qpid
quota_cores quota
quota_driver quota
quota_fixed_ips quota
quota_floating_ips quota
quota_injected_file_content_bytes quota
quota_injected_file_path_bytes quota
quota_injected_files quota
quota_instances quota
quota_key_pairs quota
quota_metadata_items quota
quota_ram quota
quota_security_group_rules quota
quota_security_groups quota
rabbit_durable_queues rabbitmq
rabbit_ha_queues rabbitmq
rabbit_host rabbitmq
rabbit_hosts rabbitmq
rabbit_max_retries rabbitmq
rabbit_password rabbitmq
rabbit_port rabbitmq
rabbit_retry_backoff rabbitmq
rabbit_retry_interval rabbitmq
rabbit_use_ssl rabbitmq
rabbit_userid rabbitmq
rabbit_virtual_host rabbitmq
ram_allocation_ratio scheduling
ram_weight_multiplier scheduling
rbd_secret_uuid volume
rbd_user volume
reboot_timeout compute
reclaim_instance_interval compute
recv_timeout zookeeper
region_list ec2
remove_unused_base_images hypervisor
remove_unused_kernels hypervisor
remove_unused_original_minimum_age_seconds hypervisor
remove_unused_resized_minimum_age_seconds hypervisor
report_interval common
rescue_image_id hypervisor
rescue_kernel_id hypervisor
rescue_ramdisk_id hypervisor
rescue_timeout hypervisor
reservation_expire policy
reserve_percent cells
reserved_host_disk_mb scheduling
reserved_host_memory_mb scheduling
resize_confirm_window compute
resume_guests_state_on_host_boot compute
ringfile rpc
rootwrap_config common
routing_source_ip network
rpc_backend rpc
rpc_cast_timeout rpc
rpc_conn_pool_size rpc
rpc_driver_queue_base rpc
rpc_response_timeout rpc
rpc_thread_pool_size rpc
rpc_zmq_bind_address zeromq
rpc_zmq_contexts zeromq
rpc_zmq_host zeromq
rpc_zmq_ipc_dir zeromq
rpc_zmq_matchmaker zeromq
rpc_zmq_port zeromq
rpc_zmq_topic_backlog zeromq
run_external_periodic_tasks periodic
running_deleted_instance_action compute
running_deleted_instance_poll_interval compute
running_deleted_instance_timeout compute
s3_access_key s3
s3_affix_tenant s3
s3_host s3
s3_listen s3
s3_listen_port s3
s3_port s3
s3_secret_key s3
s3_use_ssl s3
scality_sofs_config volume
scality_sofs_mount_point volume
scheduler upgrade_levels
scheduler_available_filters scheduling
scheduler_default_filters scheduling
scheduler_driver scheduling
scheduler_filter_classes scheduling
scheduler_host_manager scheduling
scheduler_host_subset_size scheduling
scheduler_json_config_location scheduling
scheduler_manager scheduling
scheduler_max_attempts scheduling
scheduler_retries scheduling
scheduler_retry_delay scheduling
scheduler_topic scheduling
scheduler_weight_classes scheduling
security_group_api network
send_arp_for_ha network
send_arp_for_ha_count network
server_listen spice
server_proxyclient_address spice
service_down_time common
service_neutron_metadata_proxy neutron
servicegroup_driver api
sg_prefix zookeeper
sg_retry_interval zookeeper
share_dhcp_address network
shelved_offload_time compute
shelved_poll_interval compute
snapshot_image_format hypervisor
snapshot_name_template api
sql_connection baremetal
sql_connection db
sql_connection_debug db
sql_connection_trace db
sql_idle_timeout db
sql_max_overflow db
sql_max_pool_size db
sql_max_retries db
sql_min_pool_size db
sql_retry_interval db
sqlite_db db
sqlite_synchronous db
sr_matching_filter xen
ssl_ca_file wsgi
ssl_cert_file wsgi
ssl_key_file wsgi
state_path common
stub_compute xen
sync_power_state_interval compute
syslog_log_facility logging
target_host xen
target_port xen
tcp_keepidle wsgi
teardown_unused_network_gateway network
tempdir common
terminal baremetal
terminal_cert_dir baremetal
terminal_pid_dir baremetal
tftp_root baremetal
tile_pdu_ip tilera
tile_pdu_mgr tilera
tile_pdu_off tilera
tile_pdu_on tilera
tile_pdu_status tilera
tile_power_wait tilera
timeout_nbd hypervisor
topic cells
topic conductor
topics rpc
until_refresh policy
update_dns_entries network
use_cow_images hypervisor
use_forwarded_for api
use_ipv6 ipv6
use_join_force xen
use_linked_clone vmware
use_local conductor
use_network_dns_servers network
use_neutron_default_nets network
use_project_ca ca
use_single_default_gateway network
use_stderr logging
use_syslog logging
use_unsafe_iscsi baremetal
use_usb_tablet hypervisor
user_cert_subject ca
vcpu_pin_set hypervisor
verbose logging
vif_driver baremetal
virt_mkfs hypervisor
virtual_power_host_key baremetal
virtual_power_host_pass baremetal
virtual_power_host_user baremetal
virtual_power_ssh_host baremetal
virtual_power_ssh_port baremetal
virtual_power_type baremetal
vlan_interface network
vlan_start network
vmwareapi_api_retry_count vmware
vmwareapi_cluster_name vmware
vmwareapi_host_ip vmware
vmwareapi_host_password vmware
vmwareapi_host_username vmware
vmwareapi_task_poll_interval vmware
vmwareapi_vlan_interface vmware
vmwareapi_wsdl_loc vmware
vnc_enabled vnc
vnc_keymap vnc
vnc_password vnc
vnc_port vnc
vnc_port_total vnc
vncserver_listen vnc
vncserver_proxyclient_address vnc
volume_api_class volumes
volume_attach_retry_count volumes
volume_attach_retry_interval volumes
volume_driver volumes
volume_usage_poll_interval volumes
vpn_flavor vpn
vpn_image_id vpn
vpn_ip vpn
vpn_key_suffix vpn
vpn_start vpn
vswitch_name hyperv
wsgi_log_format wsgi
xen_hvmloader_path xen
xenapi_agent_path xen
xenapi_check_host xen
xenapi_connection_concurrent xen
xenapi_connection_password xen
xenapi_connection_url xen
xenapi_connection_username xen
xenapi_disable_agent xen
xenapi_image_upload_handler xen
xenapi_login_timeout xen
xenapi_num_vbd_unplug_retries xen
xenapi_ovs_integration_bridge xen
xenapi_remap_vbd_dev xen
xenapi_remap_vbd_dev_prefix xen
xenapi_running_timeout xen
xenapi_sparse_copy xen
xenapi_sr_base_path xen
xenapi_torrent_base_url xen
xenapi_torrent_download_stall_cutoff xen
xenapi_torrent_images xen
xenapi_torrent_listen_port_end xen
xenapi_torrent_listen_port_start xen
xenapi_torrent_max_last_accessed xen
xenapi_torrent_max_seeder_processes_per_host xen
xenapi_torrent_seed_chance xen
xenapi_torrent_seed_duration xen
xenapi_use_agent_default xen
xenapi_vhd_coalesce_max_attempts xen
xenapi_vhd_coalesce_poll_interval xen
xenapi_vif_driver xen
xvpvncproxy_base_url xvpnvncproxy
xvpvncproxy_host xvpnvncproxy
xvpvncproxy_port xvpnvncproxy

Binary file not shown.

View File

@ -0,0 +1,3 @@
These are to be placed in a directory *above* source repos.
Edit the genconfs.sh line near the top that lists projects, for testing.

View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
# vim: tabstop=4 shiftwidth=4 softtabstop=4
proj_list="ceilometer cinder glance keystone nova neutron"
#proj_list="keystone"
for proj in ${proj_list}; do
cd ${proj};
# -o ! -path "build/*" \
FILES=$(find ${proj} -type f -name "*.py" ! -path "${proj}/tests/*" \
! -path "build/*" \
-exec grep -l "Opt(" {} \; | sort -u)
BINS=$(echo bin/${proj}-* | grep -v ${proj}-rootwrap)
export EVENTLET_NO_GREENDNS=yes
PYTHONPATH=./:${PYTHONPATH} \
python $(dirname "$0")/../generator.py ${FILES} ${BINS} > \
../${proj}.conf.sample
# Remove compiled files created by imp.import_source()
for bin in ${BINS}; do
[ -f ${bin}c ] && rm ${bin}c
done
cd -
done

View File

@ -0,0 +1,262 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Zhongyue Luo, SINA Corporation.
#
# ====================
# Leaving original copyright/licensing info for now... though I made
# a couple small changes...
# --Steven Deaton (Jun. 11, 2013)
# ====================
"""Extracts OpenStack config option info from module(s)."""
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
from openstack.common import gettextutils
from openstack.common import importutils
# sld
# ...not sure about these being needed, so they are commented for now.
#gettextutils.install('nova')
#gettextutils.install('ceilometer')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_COUNT = 0
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
WORDWRAP_WIDTH = 60
def generate(srcfiles):
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
continue
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
print "# Total option count: %d" % OPTION_COUNT
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except ImportError as ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception:
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for key, value in group._opts.items():
if value['opt'] == opt:
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for key, value in cfg.CONF.items():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print "[%s]" % group
print
global OPTION_COUNT
for mod, opts in opts_by_module:
OPTION_COUNT += len(opts)
print '#'
print '# Options defined in %s' % mod
print '#'
print
for opt in opts:
_print_opt(opt)
print
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(s):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if s.startswith(BASEDIR):
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in s:
return s.replace(BASEDIR, '')
elif s == _get_my_ip():
return '10.0.0.1'
elif s == socket.getfqdn():
return 'localhost'
elif s.strip() != s:
return '"%s"' % s
return s
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError), err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
try:
if opt_default is None:
print '#%s=<None>' % opt_name
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, str(opt_default).lower())
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print '#%s=%s' % (opt_name, ','.join(opt_default))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print '#%s=%s' % (opt_name, default)
print
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
if len(sys.argv) < 2:
print "usage: %s [srcfile]...\n" % sys.argv[0]
sys.exit(0)
generate(sys.argv[1:])
if __name__ == '__main__':
main()