Add support for multiple ceph files

This patch is adding a feature for an option to copy different
ceph configuration files and corresponding keyrings for cinder,
glance, manila, gnocchi and nova services.

This is especially useful when the deployment uses availability
zones as below example.

  - Individual compute can read/write to individual ceph
    cluster in same AZ.
  - Cinder can write to several ceph clusters in several AZs.
  - Glance can use multistore and upload images to
    several ceph clusters in several AZs at once.

Change-Id: Ie4d8ab5a3df748137835cae1c943b9180cd10eb1
This commit is contained in:
Michal Arbet 2023-03-13 08:13:57 +00:00
parent 3b24d77c48
commit fdf2385f14
30 changed files with 618 additions and 285 deletions

View File

@ -1086,11 +1086,11 @@ ceph_manila_user: "manila"
ceph_nova_user: "{{ ceph_cinder_user }}"
# External Ceph keyrings
ceph_cinder_keyring: "ceph.client.cinder.keyring"
ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
ceph_glance_keyring: "ceph.client.glance.keyring"
ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
ceph_manila_keyring: "ceph.client.manila.keyring"
ceph_cinder_keyring: "client.{{ ceph_cinder_user }}.keyring"
ceph_cinder_backup_keyring: "client.{{ ceph_cinder_backup_user }}.keyring"
ceph_glance_keyring: "client.{{ ceph_glance_user }}.keyring"
ceph_gnocchi_keyring: "client.{{ ceph_gnocchi_user }}.keyring"
ceph_manila_keyring: "client.{{ ceph_manila_user }}.keyring"
ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
#####################

View File

@ -234,8 +234,6 @@ cinder_api_workers: "{{ openstack_service_workers }}"
# Cinder
####################
cinder_backends:
- name: "{{ cinder_backend_ceph_name }}"
enabled: "{{ cinder_backend_ceph | bool }}"
- name: "{{ cinder_backend_lvm_name }}"
enabled: "{{ enable_cinder_backend_lvm | bool }}"
- name: "{{ cinder_backend_nfs_name }}"
@ -266,9 +264,17 @@ cinder_backend_pure_iscsi_name: "Pure-FlashArray-iscsi"
cinder_backend_pure_fc_name: "Pure-FlashArray-fc"
cinder_backend_pure_roce_name: "Pure-FlashArray-roce"
cinder_ceph_backends:
- name: "{{ cinder_backend_ceph_name }}"
cluster: "ceph"
enabled: "{{ cinder_backend_ceph | bool }}"
cinder_backup_backend_ceph_name: "rbd-1"
cinder_backup_ceph_backend: "{{ cinder_ceph_backends | selectattr('name', 'equalto', cinder_backup_backend_ceph_name) | list | first }}"
skip_cinder_backend_check: False
cinder_enabled_backends: "{{ cinder_backends | selectattr('enabled', 'equalto', true) | list }}"
cinder_enabled_backends: "{{ cinder_backends | selectattr('enabled', 'equalto', true) | list + cinder_ceph_backends | selectattr('enabled', 'equalto', true) | list }}"
####################
# Notification

View File

@ -1,30 +1,55 @@
---
- name: Copying over ceph.conf for Cinder
- name: Ensuring cinder service ceph config subdirs exists
vars:
service: "{{ cinder_services[item] }}"
file:
path: "{{ node_config_directory }}/{{ item }}/ceph"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
with_items:
- "cinder-volume"
- "cinder-backup"
- name: Copying over multiple ceph.conf for cinder services
vars:
services_need_config:
- "cinder-volume"
- "cinder-backup"
service_name: "{{ item.0.key }}"
service: "{{ item.0.value }}"
cluster: "{{ item.1.cluster }}"
merge_configs:
sources:
- "{{ node_custom_config }}/cinder/ceph.conf"
- "{{ node_custom_config }}/cinder/{{ item.key }}/ceph.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/ceph.conf"
- "{{ node_custom_config }}/cinder/{{ cluster }}.conf"
- "{{ node_custom_config }}/cinder/{{ service_name }}/{{ cluster }}.conf"
dest: "{{ node_config_directory }}/{{ service_name }}/ceph/{{ cluster }}.conf"
mode: "0660"
become: true
when:
- item.value.enabled | bool
- inventory_hostname in groups[item.value.group]
- item.key in services_need_config
with_dict: "{{ cinder_services }}"
- service.enabled | bool
- inventory_hostname in groups[service.group]
- service_name in services_need_config
with_nested:
- "{{ cinder_services | dict2items }}"
- "{{ cinder_ceph_backends }}"
notify:
- Restart {{ item.key }} container
- "Restart {{ service_name }} container"
- name: Copy over Ceph keyring files for cinder-volume
vars:
keyring: "{{ item.cluster }}.{{ ceph_cinder_keyring }}"
template:
src: "{{ node_custom_config }}/cinder/cinder-volume/{{ ceph_cinder_keyring }}"
dest: "{{ node_config_directory }}/cinder-volume/"
src: "{{ node_custom_config }}/cinder/cinder-volume/{{ keyring }}"
dest: "{{ node_config_directory }}/cinder-volume/ceph/{{ keyring }}"
mode: "0660"
become: true
with_items: "{{ cinder_ceph_backends }}"
when:
- external_ceph_cephx_enabled | bool
- inventory_hostname in groups['cinder-volume']
@ -34,14 +59,14 @@
- name: Copy over Ceph keyring files for cinder-backup
template:
src: "{{ node_custom_config }}/cinder/{{ item }}"
dest: "{{ node_config_directory }}/cinder-backup/"
src: "{{ node_custom_config }}/cinder/cinder-backup/{{ item }}"
dest: "{{ node_config_directory }}/cinder-backup/ceph/{{ item }}"
mode: "0660"
become: true
register: cinder_backup_ceph_keyring
with_items:
- "cinder-backup/{{ ceph_cinder_keyring }}"
- "cinder-backup/{{ ceph_cinder_backup_keyring }}"
- "{{ cinder_backup_ceph_backend.cluster }}.{{ ceph_cinder_keyring }}"
- "{{ cinder_backup_ceph_backend.cluster }}.{{ ceph_cinder_backup_keyring }}"
when:
- external_ceph_cephx_enabled | bool
- inventory_hostname in groups['cinder-backup']

View File

@ -14,25 +14,10 @@
"perm": "0600"
}{% endif %}{% if cinder_backend_ceph | bool %},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"source": "{{ container_config_directory }}/ceph",
"dest": "/etc/ceph",
"owner": "cinder",
"perm": "0600",
"optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/{{ ceph_cinder_keyring }}",
"dest": "/etc/ceph/{{ ceph_cinder_keyring }}",
"owner": "cinder",
"perm": "0600",
"optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/{{ ceph_cinder_backup_keyring }}",
"dest": "/etc/ceph/{{ ceph_cinder_backup_keyring }}",
"owner": "cinder",
"perm": "0600",
"optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
"perm": "0600"
}{% endif %}
],
"permissions": [

View File

@ -6,21 +6,13 @@
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
},
}{% if cinder_backend_ceph | bool %},
{
"source": "{{ container_config_directory }}/{{ ceph_cinder_keyring }}",
"dest": "/etc/ceph/{{ ceph_cinder_keyring }}",
"source": "{{ container_config_directory }}/ceph",
"dest": "/etc/ceph",
"owner": "cinder",
"perm": "0600",
"optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "cinder",
"perm": "0600",
"optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
},
"perm": "0600"
}{% endif %},
{
"source": "{{ container_config_directory }}/nfs_shares",
"dest": "/etc/cinder/nfs_shares",

View File

@ -27,7 +27,7 @@ enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
{% if service_name == "cinder-backup" and enable_cinder_backup | bool %}
{% if cinder_backup_driver == "ceph" %}
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_conf = /etc/ceph/{{ cinder_backup_ceph_backend.cluster }}.conf
backup_ceph_user = {{ ceph_cinder_backup_user }}
backup_ceph_chunk_size = 134217728
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
@ -132,15 +132,22 @@ target_protocol = iscsi
{% endif %}
{% if cinder_backend_ceph | bool %}
[{{ cinder_backend_ceph_name }}]
{% for backend in cinder_ceph_backends %}
[{{ backend.name }}]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = {{ cinder_backend_ceph_name }}
volume_backend_name = {{ backend.name }}
rbd_pool = {{ ceph_cinder_pool_name }}
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_ceph_conf = /etc/ceph/{{ backend.cluster }}.conf
rados_connect_timeout = 5
rbd_user = {{ ceph_cinder_user }}
rbd_cluster_name = {{ backend.cluster }}
rbd_keyring_conf = /etc/ceph/{{ backend.cluster }}.{{ ceph_cinder_keyring }}
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
report_discard_supported = True
{% if backend.availability_zone is defined %}
backend_availability_zone = {{ backend.availability_zone }}
{% endif %}
{% endfor %}
{% endif %}
{% if enable_cinder_backend_nfs | bool %}

View File

@ -213,9 +213,6 @@ glance_backends:
- name: http
type: http
enabled: true
- name: rbd
type: rbd
enabled: "{{ glance_backend_ceph | bool }}"
- name: vmware
type: vmware
enabled: "{{ glance_backend_vmware | bool }}"
@ -226,7 +223,13 @@ glance_backends:
type: swift
enabled: "{{ glance_backend_swift | bool }}"
glance_store_backends: "{{ glance_backends | selectattr('enabled', 'equalto', true) | list }}"
glance_ceph_backends:
- name: "rbd"
type: "rbd"
cluster: "ceph"
enabled: "{{ glance_backend_ceph | bool }}"
glance_store_backends: "{{ glance_backends | selectattr('enabled', 'equalto', true) | list + glance_ceph_backends | selectattr('enabled', 'equalto', true) | list }}"
####################
# OpenStack

View File

@ -1,20 +1,38 @@
---
- name: Copy over ceph.conf for Glance
template:
src: "{{ node_custom_config }}/glance/ceph.conf"
dest: "{{ node_config_directory }}/glance-api/ceph.conf"
- name: Ensuring glance service ceph config subdir exists
vars:
service: "{{ glance_services['glance-api'] }}"
file:
path: "{{ node_config_directory }}/glance-api/ceph"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
- name: Copy over multiple ceph configs for Glance
merge_configs:
sources:
- "{{ node_custom_config }}/glance/{{ item.cluster }}.conf"
- "{{ node_custom_config }}/glance/glance-api/{{ item.cluster }}.conf"
dest: "{{ node_config_directory }}/glance-api/ceph/{{ item.cluster }}.conf"
mode: "0660"
become: true
when: inventory_hostname in groups['glance-api']
with_items: "{{ glance_ceph_backends }}"
notify:
- Restart glance-api container
- name: Copy over ceph Glance keyring
- name: Copy over ceph Glance keyrings
template:
src: "{{ node_custom_config }}/glance/{{ ceph_glance_keyring }}"
dest: "{{ node_config_directory }}/glance-api/{{ ceph_glance_keyring }}"
src: "{{ node_custom_config }}/glance/{{ item.cluster }}.{{ ceph_glance_keyring }}"
dest: "{{ node_config_directory }}/glance-api/ceph/{{ item.cluster }}.{{ ceph_glance_keyring }}"
mode: "0660"
become: true
with_items: "{{ glance_ceph_backends }}"
when: inventory_hostname in groups['glance-api']
notify:
- Restart glance-api container

View File

@ -59,7 +59,7 @@ flavor = {% if enable_glance_image_cache | bool %}keystone+cachemanagement{% els
{% if glance_backend_vmware | bool %}
default_backend = vmware
{% elif glance_backend_ceph | bool %}
default_backend = rbd
default_backend = "{{ glance_ceph_backends[0].name }}"
{% elif glance_backend_swift | bool %}
default_backend = swift
{% else %}
@ -72,9 +72,12 @@ filesystem_store_datadir = /var/lib/glance/images/
{% endif %}
{% if glance_backend_ceph | bool %}
[rbd]
{% for backend in glance_ceph_backends %}
[{{ backend.name }}]
rbd_store_user = {{ ceph_glance_user }}
rbd_store_pool = {{ ceph_glance_pool_name }}
rbd_store_ceph_conf = /etc/ceph/{{ backend.cluster }}.conf
{% endfor %}
{% endif %}
{% if glance_backend_swift | bool %}

View File

@ -14,14 +14,8 @@
"perm": "0600"
}{% endif %}{% if glance_backend_ceph | bool %},
{
"source": "{{ container_config_directory }}/{{ ceph_glance_keyring }}",
"dest": "/etc/ceph/{{ ceph_glance_keyring }}",
"owner": "glance",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"source": "{{ container_config_directory }}/ceph",
"dest": "/etc/ceph",
"owner": "glance",
"perm": "0600"
}{% endif %}{% if glance_backend_swift | bool %},

View File

@ -189,3 +189,5 @@ gnocchi_ks_users:
user: "{{ gnocchi_keystone_user }}"
password: "{{ gnocchi_keystone_password }}"
role: "admin"
gnocchi_ceph_cluster: "ceph"

View File

@ -1,8 +1,23 @@
---
- name: Copy over ceph.conf file
template:
src: "{{ node_custom_config }}/gnocchi/ceph.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/ceph.conf"
- name: Ensuring gnocchi service ceph config subdir exists
file:
path: "{{ node_config_directory }}/{{ item.key }}/ceph"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
with_dict: "{{ gnocchi_services }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
- name: Copy over ceph config for Gnocchi
merge_configs:
sources:
- "{{ node_custom_config }}/gnocchi/{{ gnocchi_ceph_cluster }}.conf"
- "{{ node_custom_config }}/gnocchi/{{ item.key }}/{{ gnocchi_ceph_cluster }}.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/ceph/{{ gnocchi_ceph_cluster }}.conf"
mode: "0660"
become: true
when:
@ -12,16 +27,16 @@
notify:
- Restart {{ item.key }} container
- name: Copy over ceph gnocchi keyring
- name: Copy over ceph Gnocchi keyrings
template:
src: "{{ node_custom_config }}/gnocchi/{{ ceph_gnocchi_keyring }}"
dest: "{{ node_config_directory }}/{{ item.key }}/{{ ceph_gnocchi_keyring }}"
src: "{{ node_custom_config }}/gnocchi/{{ gnocchi_ceph_cluster }}.{{ ceph_gnocchi_keyring }}"
dest: "{{ node_config_directory }}/{{ item.key }}/ceph/{{ gnocchi_ceph_cluster }}.{{ ceph_gnocchi_keyring }}"
mode: "0660"
become: true
with_dict: "{{ gnocchi_services }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ gnocchi_services }}"
notify:
- Restart {{ item.key }} container

View File

@ -22,14 +22,8 @@
"perm": "0600"
}{% endif %}{% if gnocchi_backend_storage == 'ceph' %},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "gnocchi",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/{{ ceph_gnocchi_keyring }}",
"dest": "/etc/ceph/{{ ceph_gnocchi_keyring }}",
"source": "{{ container_config_directory }}/ceph",
"dest": "/etc/ceph",
"owner": "gnocchi",
"perm": "0600"
}{% endif %}

View File

@ -14,14 +14,8 @@
"perm": "0600"
}{% endif %}{% if gnocchi_backend_storage == 'ceph' %},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "gnocchi",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/{{ ceph_gnocchi_keyring }}",
"dest": "/etc/ceph/{{ ceph_gnocchi_keyring }}",
"source": "{{ container_config_directory }}/ceph",
"dest": "/etc/ceph",
"owner": "gnocchi",
"perm": "0600"
}{% endif %}

View File

@ -14,14 +14,8 @@
"perm": "0600"
}{% endif %}{% if gnocchi_backend_storage == 'ceph' %},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "gnocchi",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/{{ ceph_gnocchi_keyring }}",
"dest": "/etc/ceph/{{ ceph_gnocchi_keyring }}",
"source": "{{ container_config_directory }}/ceph",
"dest": "/etc/ceph",
"owner": "gnocchi",
"perm": "0600"
}{% endif %}

View File

@ -79,8 +79,8 @@ file_basepath = /var/lib/gnocchi
driver = ceph
ceph_pool = {{ ceph_gnocchi_pool_name }}
ceph_username = {{ ceph_gnocchi_user }}
ceph_keyring = /etc/ceph/{{ ceph_gnocchi_keyring }}
ceph_conffile = /etc/ceph/ceph.conf
ceph_keyring = /etc/ceph/{{ gnocchi_ceph_cluster }}.{{ ceph_gnocchi_keyring }}
ceph_conffile = /etc/ceph/{{ gnocchi_ceph_cluster }}.conf
{% elif gnocchi_backend_storage == 'swift' %}
driver = swift
swift_authurl = {{ keystone_internal_url }}

View File

@ -225,24 +225,30 @@ manila_backends:
protocols:
- "NFS"
- "CIFS"
- name: "cephfsnative1"
driver: "cephfsnative"
enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
protocols:
- "CEPHFS"
- name: "cephfsnfs1"
driver: "cephfsnfs"
enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
protocols:
- "NFS"
- "CIFS"
- name: "glusterfsnfs1"
driver: "glusterfsnfs"
enabled: "{{ enable_manila_backend_glusterfs_nfs | bool }}"
protocols:
- "NFS"
manila_enabled_backends: "{{ manila_backends | selectattr('enabled', 'equalto', true) | list }}"
manila_ceph_backends:
- name: "cephfsnative1"
share_name: "CEPHFS1"
driver: "cephfsnative"
cluster: "ceph"
enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
protocols:
- "CEPHFS"
- name: "cephfsnfs1"
share_name: "CEPHFSNFS1"
driver: "cephfsnfs"
cluster: "ceph"
enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
protocols:
- "NFS"
- "CIFS"
manila_enabled_backends: "{{ manila_backends | selectattr('enabled', 'equalto', true) | list + manila_ceph_backends | selectattr('enabled', 'equalto', true) | list }}"
####################

View File

@ -1,23 +1,43 @@
---
- name: Copying over ceph.conf for manila
template:
src: "{{ node_custom_config }}/manila/ceph.conf"
dest: "{{ node_config_directory }}/manila-share/ceph.conf"
- name: Ensuring manila service ceph config subdir exists
vars:
service: "{{ manila_services['manila-share'] }}"
file:
path: "{{ node_config_directory }}/manila-share/ceph"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
- name: Copy over multiple ceph configs for Manila
merge_configs:
sources:
- "{{ node_custom_config }}/manila/{{ item.cluster }}.conf"
- "{{ node_custom_config }}/manila/manila-share/{{ item.cluster }}.conf"
dest: "{{ node_config_directory }}/manila-share/ceph/{{ item.cluster }}.conf"
mode: "0660"
become: true
when:
- inventory_hostname in groups['manila-share']
- item.enabled | bool
with_items: "{{ manila_ceph_backends }}"
notify:
- Restart manila-share container
- name: Copy over Ceph keyring files for manila
- name: Copy over ceph Manila keyrings
template:
src: "{{ node_custom_config }}/manila/{{ ceph_manila_keyring }}"
dest: "{{ node_config_directory }}/manila-share/{{ ceph_manila_keyring }}"
mode: "0600"
src: "{{ node_custom_config }}/manila/{{ item.cluster }}.{{ ceph_manila_keyring }}"
dest: "{{ node_config_directory }}/manila-share/ceph/{{ item.cluster }}.{{ ceph_manila_keyring }}"
mode: "0660"
become: true
with_items: "{{ manila_ceph_backends }}"
when:
- inventory_hostname in groups['manila-share']
- item.enabled | bool
notify:
- Restart manila-share container

View File

@ -106,33 +106,41 @@ hitachi_hnas_file_system_name = {{ hnas_file_system_name }}
{% endif %}
{% if enable_manila_backend_cephfs_native | bool %}
[cephfsnative1]
{% for backend in manila_ceph_backends %}
{% if backend.driver == 'cephfsnative' %}
[{{ backend.name }}]
driver_handles_share_servers = False
share_backend_name = CEPHFS1
share_backend_name = {{ backend.share_name }}
share_driver = manila.share.drivers.cephfs.driver.CephFSDriver
cephfs_conf_path = /etc/ceph/ceph.conf
cephfs_conf_path = /etc/ceph/{{ backend.cluster }}.conf
cephfs_auth_id = {{ ceph_manila_user }}
cephfs_cluster_name = ceph
cephfs_cluster_name = {{ backend.cluster }}
{% if manila_cephfs_filesystem_name | length %}
cephfs_filesystem_name = {{ manila_cephfs_filesystem_name }}
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
{% if enable_manila_backend_cephfs_nfs | bool %}
[cephfsnfs1]
{% for backend in manila_ceph_backends %}
{% if backend.driver == 'cephfsnfs' %}
[{{ backend.name }}]
driver_handles_share_servers = False
share_backend_name = CEPHFSNFS1
share_backend_name = {{ backend.share_name }}
share_driver = manila.share.drivers.cephfs.driver.CephFSDriver
cephfs_protocol_helper_type = NFS
cephfs_conf_path = /etc/ceph/ceph.conf
cephfs_conf_path = /etc/ceph/{{ backend.cluster }}.conf
cephfs_auth_id = {{ ceph_manila_user }}
cephfs_cluster_name = ceph
cephfs_cluster_name = {{ backend.cluster }}
{% if manila_cephfs_filesystem_name | length %}
cephfs_filesystem_name = {{ manila_cephfs_filesystem_name }}
{% endif %}
cephfs_ganesha_server_is_remote= False
cephfs_ganesha_server_ip = {{ api_interface_address }}
{% endif %}
{% endfor %}
{% endif %}
{% if enable_manila_backend_glusterfs_nfs | bool %}
[glusterfsnfs1]

View File

@ -8,14 +8,8 @@
"perm": "0600"
}{% if enable_manila_backend_cephfs_native | bool or enable_manila_backend_cephfs_nfs | bool %},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "manila",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/{{ ceph_manila_keyring }}",
"dest": "/etc/ceph/{{ ceph_manila_keyring }}",
"source": "{{ container_config_directory }}/ceph",
"dest": "/etc/ceph",
"owner": "manila",
"perm": "0600"
}{% endif %}{% if manila_policy_file is defined %},

View File

@ -13,7 +13,7 @@ osapi_share_workers = {{ manila_api_workers }}
rootwrap_config = /etc/manila/rootwrap.conf
api_paste_config = /etc/manila/api-paste.ini
enabled_share_protocols = "{{ manila_backends|selectattr('enabled', 'equalto', true)|sum(attribute='protocols', start=[]) | unique | join(',') }}"
enabled_share_protocols = "{{ manila_enabled_backends | sum(attribute='protocols', start=[]) | unique | join(',') }}"
auth_strategy = keystone

View File

@ -528,6 +528,8 @@ nova_notification_topics:
nova_enabled_notification_topics: "{{ nova_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
nova_ceph_cluster: "ceph"
####################
# VMware
####################

View File

@ -1,9 +1,13 @@
---
- name: Check nova keyring file
vars:
keyring: "{{ nova_ceph_cluster }}.{{ ceph_nova_keyring }}"
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ keyring }}"
- "{{ node_custom_config }}/nova/{{ keyring }}"
stat:
path: "{{ node_custom_config }}/nova/{{ ceph_nova_keyring }}"
path: "{{ lookup('first_found', paths) }}"
delegate_to: localhost
run_once: True
register: nova_cephx_keyring_file
failed_when: not nova_cephx_keyring_file.stat.exists
when:
@ -11,10 +15,14 @@
- external_ceph_cephx_enabled | bool
- name: Check cinder keyring file
vars:
keyring: "{{ nova_ceph_cluster }}.{{ ceph_cinder_keyring }}"
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ keyring }}"
- "{{ node_custom_config }}/nova/{{ keyring }}"
stat:
path: "{{ node_custom_config }}/nova/{{ ceph_cinder_keyring }}"
path: "{{ lookup('first_found', paths) }}"
delegate_to: localhost
run_once: True
register: cinder_cephx_keyring_file
failed_when: not cinder_cephx_keyring_file.stat.exists
when:
@ -26,7 +34,6 @@
nova_cephx_raw_key:
"{{ lookup('template', nova_cephx_keyring_file.stat.path) | regex_search('key\\s*=.*$', multiline=True) | regex_replace('key\\s*=\\s*(.*)\\s*', '\\1') }}"
changed_when: false
run_once: True
when:
- nova_backend == "rbd"
- external_ceph_cephx_enabled | bool
@ -36,7 +43,6 @@
cinder_cephx_raw_key:
"{{ lookup('file', cinder_cephx_keyring_file.stat.path) | regex_search('key\\s*=.*$', multiline=True) | regex_replace('key\\s*=\\s*(.*)\\s*', '\\1') }}"
changed_when: false
run_once: True
when:
- cinder_backend_ceph | bool
- external_ceph_cephx_enabled | bool
@ -78,8 +84,11 @@
- name: Copy over ceph.conf
vars:
service: "{{ nova_cell_services[item] }}"
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_ceph_cluster }}.conf"
template:
src: "{{ node_custom_config }}/nova/ceph.conf"
src: "{{ lookup('first_found', paths) }}"
dest: "{{ node_config_directory }}/{{ item }}/"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
@ -97,6 +106,10 @@
- block:
- name: Ensure /etc/ceph directory exists (host libvirt)
vars:
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_ceph_cluster }}.conf"
file:
path: "/etc/ceph/"
state: "directory"
@ -106,9 +119,13 @@
become: true
- name: Copy over ceph.conf (host libvirt)
vars:
paths:
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_ceph_cluster }}.conf"
- "{{ node_custom_config }}/nova/{{ nova_ceph_cluster }}.conf"
template:
src: "{{ node_custom_config }}/nova/ceph.conf"
dest: "/etc/ceph/ceph.conf"
src: "{{ lookup('first_found', paths) }}"
dest: "/etc/ceph/{{ nova_ceph_cluster }}.conf"
owner: "root"
group: "root"
mode: "0644"

View File

@ -14,14 +14,14 @@
"perm": "0600"
}{% endif %}{% if nova_backend == "rbd" %},
{
"source": "{{ container_config_directory }}/{{ ceph_nova_keyring }}",
"dest": "/etc/ceph/{{ ceph_nova_keyring }}",
"source": "{{ container_config_directory }}/ceph.{{ ceph_nova_keyring }}",
"dest": "/etc/ceph/ceph.{{ ceph_nova_keyring }}",
"owner": "nova",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"source": "{{ container_config_directory }}/{{ nova_ceph_cluster }}.conf",
"dest": "/etc/ceph/{{ nova_ceph_cluster }}.conf",
"owner": "nova",
"perm": "0600"
}{% endif %}{% if nova_compute_virt_type == "vmware" and not vmware_vcenter_insecure | bool %},

View File

@ -241,3 +241,5 @@ zun_ks_users:
user: "{{ zun_keystone_user }}"
password: "{{ zun_keystone_password }}"
role: "admin"
zun_ceph_cluster: "ceph"

View File

@ -1,7 +1,7 @@
---
- name: Copying over ceph.conf for Zun
copy:
src: "{{ node_custom_config }}/zun/zun-compute/ceph.conf"
src: "{{ node_custom_config }}/zun/zun-compute/{{ zun_ceph_cluster }}.conf"
dest: "{{ node_config_directory }}/zun-compute/"
mode: "0660"
become: true
@ -10,7 +10,7 @@
- name: Copy over Ceph keyring files for zun-compute
copy:
src: "{{ node_custom_config }}/zun/zun-compute/{{ ceph_cinder_keyring }}"
src: "{{ node_custom_config }}/zun/zun-compute/{{ zun_ceph_cluster }}.{{ ceph_cinder_keyring }}"
dest: "{{ node_config_directory }}/zun-compute/"
mode: "0660"
become: true

View File

@ -8,15 +8,15 @@
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/{{ ceph_cinder_keyring }}",
"dest": "/etc/ceph/{{ ceph_cinder_keyring }}",
"source": "{{ container_config_directory }}/{{ zun_ceph_cluster }}.{{ ceph_cinder_keyring }}",
"dest": "/etc/ceph/{{ zun_ceph_cluster }}.{{ ceph_cinder_keyring }}",
"owner": "zun",
"perm": "0600",
"optional": {{ (not zun_configure_for_cinder_ceph | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"source": "{{ container_config_directory }}/{{ zun_ceph_cluster }}.conf",
"dest": "/etc/ceph/{{ zun_ceph_cluster }}.conf",
"owner": "zun",
"perm": "0600",
"optional": {{ (not zun_configure_for_cinder_ceph | bool) | string | lower }}

View File

@ -44,46 +44,98 @@ Glance
Ceph RBD can be used as a storage backend for Glance images. Configuring Glance
for Ceph includes the following steps:
#. Enable Glance Ceph backend in ``globals.yml``:
* Enable Glance Ceph backend in ``globals.yml``:
.. code-block:: yaml
.. code-block:: yaml
glance_backend_ceph: "yes"
glance_backend_ceph: "yes"
#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_glance_keyring`` (default: ``ceph.client.glance.keyring``)
* ``ceph_glance_user`` (default: ``glance``)
* ``ceph_glance_pool_name`` (default: ``images``)
* ``ceph_glance_keyring`` (default: ``client.glance.keyring``)
* ``ceph_glance_user`` (default: ``glance``)
* ``ceph_glance_pool_name`` (default: ``images``)
#. Copy Ceph configuration file to ``/etc/kolla/config/glance/ceph.conf``
* Copy Ceph configuration file to ``/etc/kolla/config/glance/ceph.conf``
.. path /etc/kolla/config/glance/ceph.conf
.. code-block:: ini
.. path /etc/kolla/config/glance/ceph.conf
.. code-block:: ini
[global]
fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
mon_initial_members = ceph-0
mon_host = 192.168.0.56
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
[global]
fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
keyring = /etc/ceph/ceph.client.glance.keyring
mon_initial_members = ceph-0
mon_host = 192.168.0.56
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
#. Copy Ceph keyring to ``/etc/kolla/config/glance/<ceph_glance_keyring>``
* Copy Ceph keyring to ``/etc/kolla/config/glance/ceph.<ceph_glance_keyring>``
#. For copy-on-write set following in ``/etc/kolla/config/glance.conf``:
To configure multiple Ceph backends with Glance, which is useful
for multistore:
.. path /etc/kolla/config/glance.conf
.. code-block:: ini
* Copy the Ceph configuration files into ``/etc/kolla/config/glance/`` using
different names for each
[GLOBAL]
show_image_direct_url = True
``/etc/kolla/config/glance/ceph.conf``
.. path /etc/kolla/config/glance/ceph.conf
.. code-block:: ini
[global]
fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
keyring = /etc/ceph/ceph.client.glance.keyring
mon_initial_members = ceph-0
mon_host = 192.168.0.56
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
``/etc/kolla/config/glance/rbd1.conf``
.. path /etc/kolla/config/glance/rbd1.conf
.. code-block:: ini
[global]
fsid = dbfea068-89ca-4d04-bba0-1b8a56c3abc8
keyring = /etc/ceph/rbd1.client.glance.keyring
mon_initial_members = ceph-0
mon_host = 192.10.0.100
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
* Declare Ceph backends in ``globals.yml``
.. code-block:: yaml
glance_ceph_backends:
- name: "rbd"
type: "rbd"
cluster: "ceph"
enabled: "{{ glance_backend_ceph | bool }}"
- name: "another-rbd"
type: "rbd"
cluster: "rbd1"
enabled: "{{ glance_backend_ceph | bool }}"
* Copy Ceph keyring to ``/etc/kolla/config/glance/ceph.<ceph_glance_keyring>``
and analogously to ``/etc/kolla/config/glance/rbd1.<ceph_glance_keyring>``
* For copy-on-write set following in ``/etc/kolla/config/glance.conf``:
.. path /etc/kolla/config/glance.conf
.. code-block:: ini
[GLOBAL]
show_image_direct_url = True
.. warning::
``show_image_direct_url`` can present a security risk if using more
than just Ceph as Glance backend(s). Please see
:glance-doc:`Glance show_image_direct_url <configuration/glance_api.html#DEFAULT.show_image_direct_url>`
``show_image_direct_url`` can present a security risk if using more
than just Ceph as Glance backend(s). Please see
:glance-doc:`Glance show_image_direct_url <configuration/glance_api.html#DEFAULT.show_image_direct_url>`
Cinder
------
@ -91,76 +143,146 @@ Cinder
Ceph RBD can be used as a storage backend for Cinder volumes. Configuring
Cinder for Ceph includes following steps:
#. When using external Ceph, there may be no nodes defined in the storage
group. This will cause Cinder and related services relying on this group to
fail. In this case, operator should add some nodes to the storage group,
all the nodes where ``cinder-volume`` and ``cinder-backup`` will run:
* When using external Ceph, there may be no nodes defined in the storage
group. This will cause Cinder and related services relying on this group to
fail. In this case, operator should add some nodes to the storage group,
all the nodes where ``cinder-volume`` and ``cinder-backup`` will run:
.. code-block:: ini
.. code-block:: ini
[storage]
control01
[storage]
control01
#. Enable Cinder Ceph backend in ``globals.yml``:
* Enable Cinder Ceph backend in ``globals.yml``:
.. code-block:: yaml
.. code-block:: yaml
cinder_backend_ceph: "yes"
cinder_backend_ceph: "yes"
#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_cinder_keyring`` (default: ``ceph.client.cinder.keyring``)
* ``ceph_cinder_user`` (default: ``cinder``)
* ``ceph_cinder_pool_name`` (default: ``volumes``)
* ``ceph_cinder_backup_keyring``
(default: ``ceph.client.cinder-backup.keyring``)
* ``ceph_cinder_backup_user`` (default: ``cinder-backup``)
* ``ceph_cinder_backup_pool_name`` (default: ``backups``)
* ``ceph_cinder_keyring`` (default: ``client.cinder.keyring``)
* ``ceph_cinder_user`` (default: ``cinder``)
* ``ceph_cinder_pool_name`` (default: ``volumes``)
* ``ceph_cinder_backup_keyring``
(default: ``client.cinder-backup.keyring``)
* ``ceph_cinder_backup_user`` (default: ``cinder-backup``)
* ``ceph_cinder_backup_pool_name`` (default: ``backups``)
#. Copy Ceph configuration file to ``/etc/kolla/config/cinder/ceph.conf``
* Copy Ceph configuration file to ``/etc/kolla/config/cinder/ceph.conf``
Separate configuration options can be configured for
cinder-volume and cinder-backup by adding ceph.conf files to
``/etc/kolla/config/cinder/cinder-volume`` and
``/etc/kolla/config/cinder/cinder-backup`` respectively. They
will be merged with ``/etc/kolla/config/cinder/ceph.conf``.
Separate configuration options can be configured for
cinder-volume and cinder-backup by adding ceph.conf files to
``/etc/kolla/config/cinder/cinder-volume`` and
``/etc/kolla/config/cinder/cinder-backup`` respectively. They
will be merged with ``/etc/kolla/config/cinder/ceph.conf``.
#. Copy Ceph keyring files to:
* Copy Ceph keyring files to:
* ``/etc/kolla/config/cinder/cinder-volume/<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/<ceph_cinder_backup_keyring>``
* ``/etc/kolla/config/cinder/cinder-volume/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.
<ceph_cinder_backup_keyring>``
.. note::
``cinder-backup`` requires two keyrings for accessing volumes
and backup pool.
``cinder-backup`` requires two keyrings for accessing volumes
and backup pool.
To configure ``multiple Ceph backends`` with Cinder, which is useful for
the use with availability zones:
* Copy their Ceph configuration files into ``/etc/kolla/config/cinder/`` using
different names for each
``/etc/kolla/config/cinder/ceph.conf``
.. path /etc/kolla/config/cinder/ceph.conf
.. code-block:: ini
[global]
fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
mon_initial_members = ceph-0
mon_host = 192.168.0.56
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
``/etc/kolla/config/cinder/rbd2.conf``
.. path /etc/kolla/config/cinder/rbd2.conf
.. code-block:: ini
[global]
fsid = dbfea068-89ca-4d04-bba0-1b8a56c3abc8
mon_initial_members = ceph-0
mon_host = 192.10.0.100
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
* Declare Ceph backends in ``globals.yml``
.. code-block:: yaml
cinder_ceph_backends:
- name: "rbd-1"
cluster: "ceph"
enabled: "{{ cinder_backend_ceph | bool }}"
- name: "rbd-2"
cluster: "rbd2"
availability_zone: "az2"
enabled: "{{ cinder_backend_ceph | bool }}"
* Copy Ceph keyring files for all Ceph backends:
* ``/etc/kolla/config/cinder/cinder-volume/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/ceph.
<ceph_cinder_backup_keyring>``
* ``/etc/kolla/config/cinder/cinder-volume/rbd2.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/rbd2.<ceph_cinder_keyring>``
* ``/etc/kolla/config/cinder/cinder-backup/rbd2.
<ceph_cinder_backup_keyring>``
.. note::
``cinder-backup`` requires two keyrings for accessing volumes
and backup pool.
Nova must also be configured to allow access to Cinder volumes:
#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_cinder_keyring`` (default: ``ceph.client.cinder.keyring``)
* ``ceph_cinder_keyring`` (default: ``client.cinder.keyring``)
#. Copy Ceph keyring file(s) to:
* Copy Ceph keyring file(s) to:
* ``/etc/kolla/config/nova/<ceph_cinder_keyring>``
* ``/etc/kolla/config/nova/ceph.<ceph_cinder_keyring>``
To configure ``different Ceph backend`` for nova-compute host, which
is useful for the use with availability zones:
* Copy Ceph keyring file to:
* ``/etc/kolla/config/nova/<hostname>/ceph.<ceph_cinder_keyring>``
If ``zun`` is enabled, and you wish to use cinder volumes with zun,
it must also be configured to allow access to Cinder volumes:
#. Enable Cinder Ceph backend for Zun in ``globals.yml``:
* Enable Cinder Ceph backend for Zun in ``globals.yml``:
.. code-block:: yaml
.. code-block:: yaml
zun_configure_for_cinder_ceph: "yes"
zun_configure_for_cinder_ceph: "yes"
#. Copy Ceph configuration file to:
* ``/etc/kolla/config/zun/zun-compute/ceph.conf``
* Copy Ceph configuration file to:
#. Copy Ceph keyring file(s) to:
* ``/etc/kolla/config/zun/zun-compute/ceph.conf``
* ``/etc/kolla/config/zun/zun-compute/<ceph_cinder_keyring>``
* Copy Ceph keyring file(s) to:
* ``/etc/kolla/config/zun/zun-compute/ceph.<ceph_cinder_keyring>``
Nova
@ -173,29 +295,38 @@ not need to be copied between hypervisors.
Configuring Nova for Ceph includes following steps:
#. Enable Nova Ceph backend in ``globals.yml``:
* Enable Nova Ceph backend in ``globals.yml``:
.. code-block:: yaml
.. code-block:: yaml
nova_backend_ceph: "yes"
nova_backend_ceph: "yes"
#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_nova_keyring`` (by default it's the same as
``ceph_cinder_keyring``)
* ``ceph_nova_user`` (by default it's the same as ``ceph_cinder_user``)
* ``ceph_nova_pool_name`` (default: ``vms``)
* ``ceph_nova_keyring`` (by default it's the same as
``ceph_cinder_keyring``)
* ``ceph_nova_user`` (by default it's the same as ``ceph_cinder_user``)
* ``ceph_nova_pool_name`` (default: ``vms``)
#. Copy Ceph configuration file to ``/etc/kolla/config/nova/ceph.conf``
#. Copy Ceph keyring file(s) to:
* Copy Ceph configuration file to ``/etc/kolla/config/nova/ceph.conf``
* Copy Ceph keyring file(s) to:
* ``/etc/kolla/config/nova/<ceph_nova_keyring>``
* ``/etc/kolla/config/nova/ceph.<ceph_nova_keyring>``
.. note::
.. note::
If you are using a Ceph deployment tool that generates separate Ceph
keys for Cinder and Nova, you will need to override
``ceph_nova_keyring`` and ``ceph_nova_user`` to match.
If you are using a Ceph deployment tool that generates separate Ceph
keys for Cinder and Nova, you will need to override
``ceph_nova_keyring`` and ``ceph_nova_user`` to match.
To configure ``different Ceph backend`` for nova-compute host, which
is useful for the use with availability zones:
* Copy Ceph configuration file to ``/etc/kolla/config/nova/
<hostname>/ceph.conf``
* Copy Ceph keyring file(s) to:
* ``/etc/kolla/config/nova/<hostname>/ceph.<ceph_nova_keyring>``
Gnocchi
-------
@ -203,21 +334,25 @@ Gnocchi
Ceph object storage can be used as a storage backend for Gnocchi metrics.
Configuring Gnocchi for Ceph includes following steps:
#. Enable Gnocchi Ceph backend in ``globals.yml``:
* Enable Gnocchi Ceph backend in ``globals.yml``:
.. code-block:: yaml
.. code-block:: yaml
gnocchi_backend_storage: "ceph"
gnocchi_backend_storage: "ceph"
#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_gnocchi_keyring``
(default: ``ceph.client.gnocchi.keyring``)
* ``ceph_gnocchi_user`` (default: ``gnocchi``)
* ``ceph_gnocchi_pool_name`` (default: ``gnocchi``)
* ``ceph_gnocchi_keyring``
(default: ``client.gnocchi.keyring``)
* ``ceph_gnocchi_user`` (default: ``gnocchi``)
* ``ceph_gnocchi_pool_name`` (default: ``gnocchi``)
* ``ceph_gnocchi_conf``
(default: ``ceph.conf``)
#. Copy Ceph configuration file to ``/etc/kolla/config/gnocchi/ceph.conf``
#. Copy Ceph keyring to ``/etc/kolla/config/gnocchi/<ceph_gnocchi_keyring>``
* Copy Ceph configuration file to
``/etc/kolla/config/gnocchi/<ceph_gnocchi_conf>``
* Copy Ceph keyring to
``/etc/kolla/config/gnocchi/ceph.<ceph_gnocchi_keyring>``
Manila
------
@ -225,32 +360,105 @@ Manila
CephFS can be used as a storage backend for Manila shares. Configuring Manila
for Ceph includes following steps:
#. Enable Manila Ceph backend in ``globals.yml``:
* Enable Manila Ceph backend in ``globals.yml``:
.. code-block:: yaml
.. code-block:: yaml
enable_manila_backend_cephfs_native: "yes"
enable_manila_backend_cephfs_native: "yes"
#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
* ``ceph_manila_keyring`` (default: ``ceph.client.manila.keyring``)
* ``ceph_manila_user`` (default: ``manila``)
* ``ceph_manila_keyring`` (default: ``client.manila.keyring``)
* ``ceph_manila_user`` (default: ``manila``)
.. note::
.. note::
Required Ceph identity caps for manila user are documented in
:manila-doc:`CephFS Native driver <admin/cephfs_driver.html#authorizing-the-driver-to-communicate-with-ceph>`.
Required Ceph identity caps for manila user are documented in
:manila-doc:`CephFS Native driver <admin/cephfs_driver.html#authorizing-the-driver-to-communicate-with-ceph>`.
#. Copy Ceph configuration file to ``/etc/kolla/config/manila/ceph.conf``
#. Copy Ceph keyring to ``/etc/kolla/config/manila/<ceph_manila_keyring>``
* Copy Ceph configuration file to ``/etc/kolla/config/manila/ceph.conf``
* Copy Ceph keyring to ``/etc/kolla/config/manila/ceph.<ceph_manila_keyring>``
#. If using multiple filesystems (Ceph Pacific+), set
``manila_cephfs_filesystem_name`` in ``/etc/kolla/globals.yml`` to the
name of the Ceph filesystem Manila should use.
By default, Manila will use the first filesystem returned by
the ``ceph fs volume ls`` command.
To configure ``multiple Ceph backends`` with Manila, which is useful for
the use with availability zones:
#. Setup Manila in the usual way
* Copy their Ceph configuration files into ``/etc/kolla/config/manila/`` using
different names for each
``/etc/kolla/config/manila/ceph.conf``
.. path /etc/kolla/config/manila/ceph.conf
.. code-block:: ini
[global]
fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
mon_initial_members = ceph-0
mon_host = 192.168.0.56
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
``/etc/kolla/config/manila/rbd2.conf``
.. path /etc/kolla/config/manila/rbd2.conf
.. code-block:: ini
[global]
fsid = dbfea068-89ca-4d04-bba0-1b8a56c3abc8
mon_initial_members = ceph-0
mon_host = 192.10.0.100
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
* Declare Ceph backends in ``globals.yml``
.. code-block:: yaml
manila_ceph_backends:
- name: "cephfsnative1"
share_name: "CEPHFS1"
driver: "cephfsnative"
cluster: "ceph"
enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
protocols:
- "CEPHFS"
- name: "cephfsnative2"
share_name: "CEPHFS2"
driver: "cephfsnative"
cluster: "rbd2"
enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
protocols:
- "CEPHFS"
- name: "cephfsnfs1"
share_name: "CEPHFSNFS1"
driver: "cephfsnfs"
cluster: "ceph1"
enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
protocols:
- "NFS"
- "CIFS"
- name: "cephfsnfs2"
share_name: "CEPHFSNFS2"
driver: "cephfsnfs"
cluster: "rbd2"
enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
protocols:
- "NFS"
- "CIFS"
* Copy Ceph keyring files for all Ceph backends:
* ``/etc/kolla/config/manila/manila-share/ceph.<ceph_manila_keyring>``
* ``/etc/kolla/config/manila/manila-share/rbd2.<ceph_manila_keyring>``
* If using multiple filesystems (Ceph Pacific+), set
``manila_cephfs_filesystem_name`` in ``/etc/kolla/globals.yml`` to the
name of the Ceph filesystem Manila should use.
By default, Manila will use the first filesystem returned by
the ``ceph fs volume ls`` command.
* Setup Manila in the usual way
For more details on the rest of the Manila setup, such as creating the share
type ``default_share_type``, please see :doc:`Manila in Kolla <manila-guide>`.
@ -335,6 +543,6 @@ When configuring Zun with Cinder volumes, kolla-ansible installs some
Ceph client packages on zun-compute hosts. You can set the version
of the Ceph packages installed by,
#. Configuring Ceph version details in ``/etc/kolla/globals.yml``:
* Configuring Ceph version details in ``/etc/kolla/globals.yml``:
* ``ceph_version`` (default: ``pacific``)
* ``ceph_version`` (default: ``pacific``)

View File

@ -451,27 +451,27 @@ workaround_ansible_issue_8743: yes
#external_ceph_cephx_enabled: "yes"
# Glance
#ceph_glance_keyring: "ceph.client.glance.keyring"
#ceph_glance_user: "glance"
#ceph_glance_keyring: "client.{{ ceph_glance_user }}.keyring"
#ceph_glance_pool_name: "images"
# Cinder
#ceph_cinder_keyring: "ceph.client.cinder.keyring"
#ceph_cinder_user: "cinder"
#ceph_cinder_keyring: "client.{{ ceph_cinder_user }}.keyring"
#ceph_cinder_pool_name: "volumes"
#ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
#ceph_cinder_backup_user: "cinder-backup"
#ceph_cinder_backup_keyring: "client.{{ ceph_cinder_backup_user }}.keyring"
#ceph_cinder_backup_pool_name: "backups"
# Nova
#ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
#ceph_nova_user: "nova"
#ceph_nova_pool_name: "vms"
# Gnocchi
#ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
#ceph_gnocchi_user: "gnocchi"
#ceph_gnocchi_keyring: "client.{{ ceph_gnocchi_user }}.keyring"
#ceph_gnocchi_pool_name: "gnocchi"
# Manila
#ceph_manila_keyring: "ceph.client.manila.keyring"
#ceph_manila_user: "manila"
#ceph_manila_keyring: "client.{{ ceph_manila_user }}.keyring"
#############################
# Keystone - Identity Options

View File

@ -0,0 +1,44 @@
---
features:
- |
Glance, cinder, manila services now support
configuration of multiple ceph cluster backends.
For nova and gnocchi there is the possibility to
configure different ceph clusters - for gnocchi this
is possible at the service level while for nova at
the host level. See the external ceph guide `docs <https://docs.openstack.org/kolla-ansible/latest/reference/storage/external-ceph-guide.html>`__.
on how to set multiple ceph backends for more details.
upgrade:
- |
The default value for ``ceph_cinder_keyring`` has been changed
from:
"ceph.client.cinder.keyring"
to:
"client.{{ ceph_cinder_user }}.keyring"
the default value for ``ceph_cinder_backup_keyring`` has been changed
from:
"ceph.client.cinder-backup.keyring"
to:
"client.{{ ceph_cinder_backup_user }}.keyring"
the default value for ``ceph_glance_keyring`` has been changed
from:
"ceph.client.glance.keyring"
to:
"client.{{ ceph_glance_user }}.keyring"
the default value for ``ceph_manila_keyring`` has been changed
from:
"ceph.client.manila.keyring"
to:
"client.{{ ceph_manila_user }}.keyring"
and the default value for ``ceph_gnocchi_keyring`` has been changed
from:
"ceph.client.gnocchi.keyring"
to:
"client.{{ ceph_gnocchi_user }}.keyring"
User who did override default values for the above
variables have to change them according to the new pattern.