489 lines
13 KiB
Plaintext
489 lines
13 KiB
Plaintext
{%- from "cinder/map.jinja" import volume with context %}
|
|
|
|
[DEFAULT]
|
|
rootwrap_config = /etc/cinder/rootwrap.conf
|
|
api_paste_confg = /etc/cinder/api-paste.ini
|
|
|
|
iscsi_helper = tgtadm
|
|
volume_name_template = volume-%s
|
|
#volume_group = cinder
|
|
|
|
verbose = True
|
|
|
|
osapi_volume_workers = {{ volume.get('volume_workers', '4') }}
|
|
|
|
auth_strategy = keystone
|
|
|
|
state_path = /var/lib/cinder
|
|
lock_path = /var/lock/cinder
|
|
|
|
use_syslog=False
|
|
|
|
glance_num_retries=0
|
|
debug=False
|
|
|
|
os_region_name={{ volume.identity.region }}
|
|
|
|
#glance_api_ssl_compression=False
|
|
#glance_api_insecure=False
|
|
|
|
osapi_volume_listen={{ volume.osapi.host }}
|
|
|
|
glance_host={{ volume.glance.host }}
|
|
glance_port={{ volume.glance.port }}
|
|
glance_api_version=2
|
|
|
|
volume_backend_name=DEFAULT
|
|
|
|
{%- if volume.backend is defined %}
|
|
|
|
default_volume_type={{ volume.default_volume_type }}
|
|
|
|
enabled_backends={% for backend_name, backend in volume.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
|
|
|
|
{%- else %}
|
|
|
|
default_volume_type={% for type in volume.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
|
|
|
|
enabled_backends={% for type in volume.get('types', []) %}{{ type.backend }}{% if not loop.last %},{% endif %}{% endfor %}{% if volume.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
|
|
|
|
{%- endif %}
|
|
|
|
#RPC response timeout recommended by Hitachi
|
|
rpc_response_timeout=3600
|
|
|
|
#Rabbit
|
|
rpc_backend=cinder.openstack.common.rpc.impl_kombu
|
|
control_exchange=cinder
|
|
|
|
|
|
volume_clear={{ volume.wipe_method }}
|
|
|
|
{%- if volume.notification is mapping %}
|
|
notification_driver = {{ volume.notification.get('driver', 'messagingv2') }}
|
|
{%- if volume.notification.topics is defined %}
|
|
notification_topics = {{ volume.notification.topics }}
|
|
{%- endif %}
|
|
{%- elif volume.notification %}
|
|
notification_driver = messagingv2
|
|
{%- endif %}
|
|
|
|
volume_name_template = volume-%s
|
|
|
|
#volume_group = vg_cinder_volume
|
|
|
|
volumes_dir = /var/lib/cinder/volumes
|
|
log_dir=/var/log/cinder
|
|
|
|
# Use syslog for logging. (boolean value)
|
|
#use_syslog=false
|
|
|
|
use_syslog=false
|
|
verbose=True
|
|
|
|
[oslo_concurrency]
|
|
|
|
lock_path=/var/lock/cinder
|
|
|
|
[oslo_messaging_rabbit]
|
|
{%- if volume.message_queue.members is defined %}
|
|
rabbit_hosts = {% for member in volume.message_queue.members -%}
|
|
{{ member.host }}:{{ member.get('port', 5672) }}
|
|
{%- if not loop.last -%},{%- endif -%}
|
|
{%- endfor -%}
|
|
{%- else %}
|
|
rabbit_host = {{ volume.message_queue.host }}
|
|
rabbit_port = {{ volume.message_queue.port }}
|
|
{%- endif %}
|
|
|
|
rabbit_userid = {{ volume.message_queue.user }}
|
|
rabbit_password = {{ volume.message_queue.password }}
|
|
rabbit_virtual_host = {{ volume.message_queue.virtual_host }}
|
|
kombu_reconnect_delay=5.0
|
|
|
|
{%- if volume.identity.get('version', 2) == 2 %}
|
|
|
|
[keystone_authtoken]
|
|
signing_dir=/tmp/keystone-signing-cinder
|
|
admin_password={{ volume.identity.password }}
|
|
auth_port={{ volume.identity.port }}
|
|
auth_host={{ volume.identity.host }}
|
|
admin_tenant_name={{ volume.identity.tenant }}
|
|
auth_protocol=http
|
|
admin_user=cinder
|
|
signing_dirname=/tmp/keystone-signing-cinder
|
|
auth_uri=http://{{ volume.identity.host }}:{{ volume.identity.port }}
|
|
|
|
{%- else %}
|
|
|
|
[keystone_authtoken]
|
|
signing_dir=/tmp/keystone-signing-cinder
|
|
signing_dirname=/tmp/keystone-signing-cinder
|
|
identity_uri = http://{{ volume.identity.host }}:35357/v3
|
|
revocation_cache_time = 10
|
|
auth_section = generic_password
|
|
|
|
[generic_password]
|
|
auth_plugin = password
|
|
user_domain_id = {{ volume.identity.get('domain', 'default') }}
|
|
project_domain_id = {{ volume.identity.get('domain', 'default') }}
|
|
project_name = {{ volume.identity.tenant }}
|
|
username = {{ volume.identity.user }}
|
|
password = {{ volume.identity.password }}
|
|
auth_uri=http://{{ volume.identity.host }}:5000/v3
|
|
auth_url=http://{{ volume.identity.host }}:35357/v3
|
|
|
|
{%- endif %}
|
|
|
|
[database]
|
|
idle_timeout=3600
|
|
max_pool_size=30
|
|
max_retries=-1
|
|
max_overflow=40
|
|
connection = {{ volume.database.engine }}://{{ volume.database.user }}:{{ volume.database.password }}@{{ volume.database.host }}/{{ volume.database.name }}
|
|
|
|
{# new way #}
|
|
|
|
{%- if volume.backend is defined %}
|
|
|
|
{%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
|
|
|
|
{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
|
|
{%- include backend_fragment %}
|
|
|
|
{%- endfor %}
|
|
|
|
{%- else %}
|
|
|
|
{# old way #}
|
|
|
|
{%- if volume.storage.engine == "storwize" %}
|
|
|
|
{%- for type in volume.get('types', []) %}
|
|
|
|
[{{ type.backend }}]
|
|
volume_driver = cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver
|
|
volume_backend_name={{ type.backend }}
|
|
san_ip={{ volume.storage.host }}
|
|
san_ssh_port={{ volume.storage.port }}
|
|
san_login={{ volume.storage.user }}
|
|
san_password={{ volume.storage.password }}
|
|
|
|
storwize_svc_volpool_name={{ type.pool }}
|
|
#storwize_svc_connection_protocol=iSCSI
|
|
storwize_svc_connection_protocol={{ volume.storage.connection }}
|
|
#storwize_svc_iscsi_chap_enabled=true
|
|
storwize_svc_multihost_enabled={{ volume.storage.multihost }}
|
|
storwize_svc_multipath_enabled={{ volume.storage.multipath }}
|
|
|
|
{%- endfor %}
|
|
|
|
{%- endif %}
|
|
|
|
{%- if volume.storage.engine == "hitachi_vsp" %}
|
|
|
|
{%- for type in volume.get('types', []) %}
|
|
|
|
[{{ type.backend }}]
|
|
|
|
{%- if volume.storage.version == "1.0" %}
|
|
|
|
volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
|
|
volume_backend_name={{ type.backend }}
|
|
|
|
#
|
|
# Options defined in cinder.volume.drivers.hitachi.hbsd_common
|
|
#
|
|
# Serial number of storage system (string value)
|
|
#hitachi_serial_number=<None>
|
|
hitachi_serial_number=86644
|
|
#hitachi_serial_number=355316
|
|
|
|
# Name of an array unit (string value)
|
|
#hitachi_unit_name=<None>
|
|
#hitachi_unit_name=fiat
|
|
|
|
# Pool ID of storage system (integer value)
|
|
#hitachi_pool_id=<None>
|
|
hitachi_pool_id=4
|
|
|
|
# Thin pool ID of storage system (integer value)
|
|
hitachi_thin_pool_id=13
|
|
|
|
# Default copy method of storage system (string value)
|
|
#hitachi_default_copy_method=FULL
|
|
hitachi_default_copy_method=THIN
|
|
|
|
# Control port names for HostGroup or iSCSI Target (string
|
|
# value)
|
|
#hitachi_target_ports=<None>
|
|
hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
|
|
|
|
# Request for creating HostGroup or iSCSI Target (boolean
|
|
# value)
|
|
hitachi_group_request=True
|
|
#hitachi_group_request=false
|
|
#
|
|
# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
|
|
#
|
|
# Request for FC Zone creating HostGroup (boolean value)
|
|
hitachi_zoning_request=true
|
|
#
|
|
# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
|
|
#
|
|
# Instance numbers for HORCM (string value)
|
|
#hitachi_horcm_numbers=200,201
|
|
hitachi_horcm_numbers=0,1
|
|
|
|
# Username of storage system for HORCM (string value)
|
|
#hitachi_horcm_user=<None>
|
|
#hitachi_horcm_user=openstack
|
|
hitachi_horcm_user=root
|
|
|
|
# Password of storage system for HORCM (string value)
|
|
#hitachi_horcm_password=<None>
|
|
#hitachi_horcm_password=avg2014
|
|
hitachi_horcm_password=X3tT35va
|
|
|
|
# Add to HORCM configuration (boolean value)
|
|
#hitachi_horcm_add_conf=true
|
|
hitachi_horcm_add_conf=true
|
|
|
|
#hitachi multipath advice
|
|
use_multipath_for_image_xfer=false
|
|
|
|
{%- endif %}
|
|
|
|
{%- if volume.storage.version == "1.3" %}
|
|
|
|
volume_driver = cinder.volume.drivers.hitachi.hbsd.hbsd_fc.HBSDFCDriver
|
|
volume_backend_name=hitachi_vsp
|
|
|
|
#
|
|
# Options defined in cinder.volume.drivers.hitachi.hbsd_common
|
|
#
|
|
# Serial number of storage system (string value)
|
|
#hitachi_serial_number=<None>
|
|
#hitachi_serial_number=86644
|
|
hitachi_storage_id=86644
|
|
|
|
# Pool ID of storage system (integer value)
|
|
#hitachi_pool_id=<None>
|
|
hitachi_pool=4
|
|
|
|
# Thin pool ID of storage system (integer value)
|
|
hitachi_thin_pool=13
|
|
|
|
# Default copy method of storage system (string value)
|
|
#hitachi_default_copy_method=FULL
|
|
hitachi_default_copy_method=THIN
|
|
|
|
# Control port names for HostGroup or iSCSI Target (string
|
|
# value)
|
|
#hitachi_target_ports=<None>
|
|
hitachi_target_ports=CL3-B
|
|
|
|
hitachi_compute_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
|
|
|
|
# Range of group number (string value)
|
|
#hitachi_group_range=
|
|
#hitachi_group_range=
|
|
|
|
# Request for creating HostGroup or iSCSI Target (boolean
|
|
# value)
|
|
#JPA
|
|
hitachi_group_request=True
|
|
#hitachi_group_request=false
|
|
|
|
# Instance numbers for HORCM (string value)
|
|
#hitachi_horcm_numbers=200,201
|
|
hitachi_horcm_numbers=0,1
|
|
|
|
# Username of storage system for HORCM (string value)
|
|
#hitachi_horcm_user=<None>
|
|
#hitachi_horcm_user=openstack
|
|
hitachi_horcm_user=root
|
|
|
|
# Password of storage system for HORCM (string value)
|
|
#hitachi_horcm_password=<None>
|
|
#hitachi_horcm_password=avg2014
|
|
hitachi_horcm_password=X3tT35va
|
|
|
|
# Add to HORCM configuration (boolean value)
|
|
#hitachi_horcm_add_conf=true
|
|
hitachi_horcm_add_conf=true
|
|
|
|
#hitachi multipath advice
|
|
use_multipath_for_image_xfer=false
|
|
|
|
hitachi_storage_cli=HORCM
|
|
|
|
|
|
{%- endif %}
|
|
|
|
{%- endfor %}
|
|
|
|
{%- endif %}
|
|
|
|
{%- if volume.storage.engine == "ceph" %}
|
|
|
|
{%- for type in volume.get('types', []) %}
|
|
|
|
[{{ type.backend }}]
|
|
volume_backend_name={{ type.backend }}
|
|
volume_driver = cinder.volume.drivers.rbd.RBDDriver
|
|
#
|
|
# Options defined in cinder.volume.drivers.rbd
|
|
#
|
|
# The RADOS pool where rbd volumes are stored (string value)
|
|
#rbd_pool=volumes
|
|
rbd_pool={{ type.pool }}
|
|
|
|
# The RADOS client name for accessing rbd volumes - only set
|
|
# when using cephx authentication (string value)
|
|
#rbd_user=cinder
|
|
rbd_user={{ volume.storage.user }}
|
|
|
|
# Path to the ceph configuration file (string value)
|
|
#rbd_ceph_conf=
|
|
rbd_ceph_conf=/etc/ceph/ceph.conf
|
|
|
|
# Flatten volumes created from snapshots to remove dependency
|
|
# from volume to snapshot (boolean value)
|
|
#rbd_flatten_volume_from_snapshot=false
|
|
|
|
# The libvirt uuid of the secret for the rbd_user volumes
|
|
# (string value)
|
|
#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
|
|
rbd_secret_uuid={{ volume.storage.secret_uuid }}
|
|
|
|
# Directory where temporary image files are stored when the
|
|
# volume driver does not write them directly to the volume.
|
|
# (string value)
|
|
#volume_tmp_dir=<None>
|
|
|
|
# Maximum number of nested volume clones that are taken before
|
|
# a flatten occurs. Set to 0 to disable cloning. (integer
|
|
# value)
|
|
#rbd_max_clone_depth=5
|
|
|
|
# Volumes will be chunked into objects of this size (in
|
|
# megabytes). (integer value)
|
|
#rbd_store_chunk_size=4
|
|
|
|
# Timeout value (in seconds) used when connecting to ceph
|
|
# cluster. If value < 0, no timeout is set and default
|
|
# librados value is used. (integer value)
|
|
#rados_connect_timeout=-1
|
|
|
|
{%- endfor %}
|
|
|
|
{%- endif %}
|
|
|
|
|
|
{%- if volume.storage.engine == "hp3par" %}
|
|
|
|
{%- for type in volume.get('types', []) %}
|
|
|
|
[{{ type.backend }}]
|
|
|
|
hp3par_api_url={{ volume.storage.url }}
|
|
|
|
# 3PAR Super user username
|
|
hp3par_username={{ volume.storage.user }}
|
|
|
|
# 3PAR Super user password
|
|
hp3par_password={{ volume.storage.password }}
|
|
|
|
# 3PAR CPG to use for volume creation
|
|
hp3par_cpg={{ volume.storage.cpg }}
|
|
|
|
# IP address of SAN volume for SSH access to the array
|
|
san_ip={{ volume.storage.host }}
|
|
|
|
# Username for SAN volume for SSH access to the array
|
|
san_login={{ volume.storage.login }}
|
|
|
|
# Password for SAN volume for SSH access to the array
|
|
san_password={{ volume.storage.password }}
|
|
|
|
# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
|
|
volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
|
|
|
|
# iSCSI (uncomment the next line to enable the iSCSI driver and
|
|
# hp3par_iscsi_ips or iscsi_ip_address)
|
|
#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
|
|
|
|
# iSCSI multiple port configuration
|
|
# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
|
|
#hp3par_iscsi_ips=10.10.103.151
|
|
|
|
# Still available for single port iSCSI configuration
|
|
#iscsi_ip_address=10.10.103.151
|
|
|
|
## OPTIONAL SETTINGS
|
|
# Enable HTTP debugging to 3PAR
|
|
# hp3par_debug=True
|
|
hp3par_debug={{ volume.storage.debug }}
|
|
|
|
# Enable CHAP authentication for iSCSI connections.
|
|
hp3par_iscsi_chap_enabled=false
|
|
|
|
# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
|
|
hp3par_snap_cpg={{ volume.storage.snapcpg }}
|
|
|
|
# Time in hours to retain a snapshot. You can't delete it before this expires.
|
|
hp3par_snapshot_retention=2
|
|
|
|
# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
|
|
hp3par_snapshot_expiration=4
|
|
|
|
{%- endfor %}
|
|
|
|
{%- endif %}
|
|
|
|
{%- if volume.storage.engine == "openvstorage" %}
|
|
|
|
{%- for vpool in pillar.openvstorage.server.get('vpools', []) %}
|
|
|
|
[{{ vpool }}]
|
|
volume_driver = cinder.volume.drivers.openvstorage.OVSVolumeDriver
|
|
volume_backend_name={{ vpool }}
|
|
vpool_name={{ vpool }}
|
|
|
|
{%- endfor %}
|
|
|
|
{%- endif %}
|
|
|
|
{%- if volume.storage.engine == "fujitsu" %}
|
|
|
|
{%- for type in volume.get('types', []) %}
|
|
|
|
[{{ type.backend }}]
|
|
volume_backend_name={{ type.backend }}
|
|
volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
|
|
cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.backend }}.xml
|
|
|
|
{%- endfor %}
|
|
|
|
{%- endif %}
|
|
|
|
{%- if volume.storage.engine == "gpfs" %}
|
|
|
|
{%- for type in volume.get('types', []) %}
|
|
|
|
[{{ type.backend }}]
|
|
volume_backend_name={{ type.backend }}
|
|
volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver
|
|
gpfs_mount_point_base={{ type.mount_point }}
|
|
#gpfs_mount_point_base=/mnt/gpfs-openstack/cinder/gold
|
|
gpfs_max_clone_depth=3
|
|
gpfs_sparse_volumes=true
|
|
gpfs_storage_pool=system
|
|
|
|
{%- endfor %}
|
|
|
|
{%- endif %}
|
|
|
|
{%- endif %}
|