Fix linters and metadata

With update of ansible-lint to version >=6.0.0 a lot of new
linters were added, that enabled by default. In order to comply
with linter rules we're applying changes to the role.

With that we also update metdata to reflect current state.

Change-Id: Idb2dd6cd4bbf815e4b32c9bfbe9a66f33e1c4b97
This commit is contained in:
Dmitriy Rabotyagov 2023-07-12 11:00:56 +02:00 committed by Dmitriy Rabotyagov
parent 22a63c5918
commit 94a58e398b
15 changed files with 130 additions and 82 deletions

View File

@ -40,7 +40,7 @@ ceph_client_package_state: "{{ package_state | default('latest') }}"
# package repositories.
ceph_pkg_source: ceph
ceph_stable_release: pacific
ceph_apt_pinned_packages: [{ package: "*", release: "ceph.com", priority: 1001 }]
ceph_apt_pinned_packages: [{package: "*", release: "ceph.com", priority: 1001}]
# Ceph Authentication
cephx: true
@ -55,14 +55,14 @@ ceph_cluster_name: ceph
# Path to local ceph.conf file
# Leave this commented to obtain a ceph.conf from one of the monitors defined in ceph_mons
#ceph_conf_file: |
# [global]
# fsid = 4037aa5f-abde-4378-9470-f73dbd6ceaba
# mon_initial_members = mon1.example.local,mon2.example.local,mon3.example.local
# mon_host = 10.16.5.40,10.16.5.41,10.16.5.42
# auth_cluster_required = cephx
# auth_service_required = cephx
# auth_client_required = cephx
# ceph_conf_file: |
# [global]
# fsid = 4037aa5f-abde-4378-9470-f73dbd6ceaba
# mon_initial_members = mon1.example.local,mon2.example.local,mon3.example.local
# mon_host = 10.16.5.40,10.16.5.41,10.16.5.42
# auth_cluster_required = cephx
# auth_service_required = cephx
# auth_client_required = cephx
# Path to local keyrings directory
# If you want to provide keyrings from existing files, because you do not have ssh access to the monitors
@ -73,7 +73,7 @@ ceph_cluster_name: ceph
# cinder.keyring
# glance.keyring
# etc..
#ceph_keyrings_dir: "/etc/openstack/ceph-keyrings"
# ceph_keyrings_dir: "/etc/openstack/ceph-keyrings"
# Ceph client usernames for glance, cinder+nova, gnocchi and object cache
glance_ceph_client: glance
@ -125,7 +125,8 @@ ceph_client_ceph_conf_overrides: "{{ ceph_conf_overrides | default({}) }}"
# CentOS repos
ceph_centos_epel_mirror: "{{ centos_epel_mirror | default('http://download.fedoraproject.org/pub/epel') }}"
ceph_centos_epel_key: "{{ centos_epel_key | default('http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-' ~ ansible_facts['distribution_major_version']) }}"
ceph_centos_epel_key: >-
{{ centos_epel_key | default('http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-' ~ ansible_facts['distribution_major_version']) }}
# Immutible object cache - caches a read-only base layer of rbd volumes
ceph_immutable_object_cache_enabled: False

View File

@ -18,4 +18,3 @@
state: restarted
with_items: "{{ ceph_client_filtered_services }}"
failed_when: false

View File

@ -17,29 +17,31 @@ galaxy_info:
author: openstack
description: Installation and configuration of the ceph client
company: OpenStack Foundation
role_name: ceph_client
namespace: openstack
license: Apache2
min_ansible_version: 2.2
min_ansible_version: "2.10"
platforms:
- name: Debian
versions:
- buster
- bullseye
- name: Ubuntu
versions:
- bionic
- focal
- jammy
- name: EL
versions:
- 8
- "9"
categories:
galaxy_tags:
- cloud
- ceph
- development
- openstack
dependencies:
- role: apt_package_pinning
apt_pinned_packages: "{{ ceph_apt_pinned_packages }}"
apt_package_pinning_file_name: "ceph_client_pin.pref"
when:
- ansible_facts['pkg_mgr'] == 'apt'
- role: apt_package_pinning
apt_pinned_packages: "{{ ceph_apt_pinned_packages }}"
apt_package_pinning_file_name: "ceph_client_pin.pref"
when:
- ansible_facts['pkg_mgr'] == 'apt'

View File

@ -17,10 +17,12 @@
group:
name: "{{ cephkeys_access_group }}"
- include_tasks: ceph_get_keyrings_from_mons.yml
- name: Including ceph_get_keyrings_from_mons tasks
include_tasks: ceph_get_keyrings_from_mons.yml
when: ceph_keyrings_dir is not defined
- include_tasks: ceph_get_keyrings_from_files.yml
- name: Including ceph_get_keyrings_from_files tasks
include_tasks: ceph_get_keyrings_from_files.yml
when: ceph_keyrings_dir is defined
- name: Add OpenStack service to cephkeys_access_group group
@ -42,7 +44,7 @@
when:
- inventory_hostname in groups.nova_compute
changed_when: false
failed_when: false
failed_when: false
register: libvirt_nova_defined
tags:
- always
@ -63,7 +65,7 @@
- always
- name: Define libvirt nova secret
command: virsh secret-define --file /tmp/nova-secret.xml
command: virsh secret-define --file /tmp/nova-secret.xml # noqa: no-changed-when
when:
- inventory_hostname in groups.nova_compute
- libvirt_nova_defined.rc is defined
@ -78,7 +80,7 @@
when:
- inventory_hostname in groups.nova_compute
changed_when: false
failed_when: false
failed_when: false
register: libvirt_nova_set
tags:
- always
@ -114,7 +116,8 @@
- ceph_extra_confs is defined
- inventory_hostname in groups[item]
- include_tasks: ceph_auth_extra.yml
- name: Including ceph_auth_extra tasks
include_tasks: ceph_auth_extra.yml
when:
- ceph_in_extra_auth_group is defined
- ceph_in_extra_auth_group | bool
@ -127,7 +130,8 @@
- inventory_hostname in groups[ceph_extra_compute_group]
- item.secret_uuid is defined
- include_tasks: ceph_auth_extra_compute.yml
- name: Including ceph_auth_extra_compute tasks
include_tasks: ceph_auth_extra_compute.yml
when:
- ceph_extra_nova_uuid is defined
- ceph_extra_nova_uuid | bool

View File

@ -15,8 +15,12 @@
- name: Create keyring files for openstack clients from extra cluster(s)
shell: "ceph auth get client.{{ item.client_name }} --cluster {{ ceph_cluster_name }} >/dev/null && ceph auth get-or-create client.{{ item.client_name }} --cluster {{ ceph_cluster_name }} > /etc/ceph/{{ ceph_cluster_name }}.client.{{ item.client_name }}.keyring.tmp"
shell: >-
ceph auth get client.{{ item.client_name }} --cluster {{ ceph_cluster_name }} >/dev/null &&
ceph auth get-or-create client.{{ item.client_name }} --cluster {{ ceph_cluster_name }}
> /etc/ceph/{{ ceph_cluster_name }}.client.{{ item.client_name }}.keyring.tmp
with_items: "{{ ceph_extra_confs }}"
changed_when: false
delegate_to: "{{ item.mon_host }}"
when:
- item.client_name is defined
@ -36,7 +40,7 @@
file:
path: "{{ item.keyring_src }}"
state: file
mode: 0600
mode: "0600"
delegate_to: localhost
with_items: "{{ ceph_extra_confs }}"
when:
@ -57,9 +61,9 @@
copy:
src: "{{ item.keyring_src }}"
dest: "{{ item.keyring_dest }}"
owner: root
group: "{{ cephkeys_access_group }}"
mode: 0640
owner: root
group: "{{ cephkeys_access_group }}"
mode: "0640"
notify:
- Restart os services
with_items: "{{ ceph_extra_confs }}"

View File

@ -15,15 +15,20 @@
- name: Create key files for nova_compute on extra cluster(s)
shell: "ceph auth get-key client.{{ item.client_name }} --cluster {{ ceph_cluster_name }} > /etc/ceph/{{ ceph_cluster_name }}.client.{{ item.client_name }}.key.tmp"
shell: >-
ceph auth get-key client.{{ item.client_name }} --cluster {{ ceph_cluster_name }}
> /etc/ceph/{{ ceph_cluster_name }}.client.{{ item.client_name }}.key.tmp
with_items: "{{ ceph_extra_confs }}"
changed_when: false
delegate_to: "{{ item.mon_host }}"
when:
- item.client_name is defined
- item.mon_host is defined
- name: Get extra key files
command: "scp {{ item.mon_host }}:/etc/ceph/{{ ceph_cluster_name }}.client.{{ item.client_name }}.key.tmp /tmp/{{ item.mon_host }}{{ item.client_name }}.key.tmp"
command: >-
scp {{ item.mon_host }}:/etc/ceph/{{ ceph_cluster_name }}.client.{{ item.client_name }}.key.tmp
/tmp/{{ item.mon_host }}{{ item.client_name }}.key.tmp
changed_when: false
delegate_to: localhost
with_items: "{{ ceph_extra_confs }}"
@ -35,6 +40,7 @@
copy:
src: "/tmp/{{ item.mon_host }}{{ item.client_name }}.key.tmp"
dest: "/tmp/{{ item.mon_host }}{{ item.client_name }}.key.tmp"
mode: "0640"
with_items: "{{ ceph_extra_confs }}"
when:
- item.mon_host is defined
@ -44,6 +50,7 @@
copy:
src: "{{ item.keyring_src }}"
dest: "/tmp/{{ item.secret_uuid }}{{ item.client_name }}.key.tmp"
mode: "0640"
with_items: "{{ ceph_extra_confs }}"
when:
- item.keyring_src is defined
@ -74,7 +81,7 @@
- name: Check if extra secret(s) are defined in libvirt pt1
shell: "virsh secret-dumpxml {{ item.secret_uuid }} 2>&1 >/dev/null && touch /tmp/{{ item.secret_uuid }}.libvirt_secret_exists"
changed_when: false
failed_when: false
failed_when: false
with_items: "{{ ceph_extra_confs }}"
when:
- item.secret_uuid is defined
@ -84,7 +91,7 @@
- name: Check if extra secret(s) are defined in libvirt pt2
shell: "ls /tmp | grep \\.libvirt_secret_exists | awk -F'.' '{print $1}'"
changed_when: false
failed_when: false
failed_when: false
register: libvirt_secret_exists
with_items: "{{ ceph_extra_confs }}"
when:
@ -108,7 +115,7 @@
- name: Check if extra secret values are set in libvirt pt1
shell: "virsh secret-get-value {{ item.secret_uuid }} 2>&1 >/dev/null && touch /tmp/{{ item.secret_uuid }}.libvirt_secret_value_exists "
changed_when: false
failed_when: false
failed_when: false
register: libvirt_nova_set
with_items: "{{ ceph_extra_confs }}"
when:
@ -117,7 +124,7 @@
- name: Check if extra secret values are set in libvirt pt2
shell: "ls /tmp | grep \\.libvirt_secret_value_exists | awk -F'.' '{print $1}'"
changed_when: false
failed_when: false
failed_when: false
register: libvirt_secret_value_exists
with_items: "{{ ceph_extra_confs }}"
when:
@ -125,7 +132,7 @@
tags:
- always
- name: Set extra secret value(s) in libvirt from monitor host
- name: Set extra secret value(s) in libvirt from monitor host # noqa: no-changed-when
shell: "virsh secret-set-value --secret {{ item.secret_uuid }} --base64 $(cat /tmp/{{ item.mon_host }}{{ item.client_name }}.key.tmp)"
loop: "{{ ceph_extra_confs }}"
loop_control:
@ -138,8 +145,10 @@
notify:
- Restart os services
- name: Set extra secret value(s) in libvirt from keyring
shell: "virsh secret-set-value --secret {{ item.secret_uuid }} --base64 $(awk '/key = /{print $3}' /tmp/{{ item.secret_uuid }}{{ item.client_name }}.key.tmp)"
- name: Set extra secret value(s) in libvirt from keyring # noqa: no-changed-when
shell: >-
virsh secret-set-value --secret {{ item.secret_uuid }} --base64
$(awk '/key = /{print $3}' /tmp/{{ item.secret_uuid }}{{ item.client_name }}.key.tmp)
loop: "{{ ceph_extra_confs }}"
loop_control:
index_var: index

View File

@ -46,9 +46,9 @@
openstack.config_template.config_template:
content: '{{ ceph_conf }}'
dest: "/etc/ceph/{{ ceph_cluster_name }}.conf"
owner: root
group: root
mode: 0644
owner: root
group: root
mode: "0644"
config_type: ini
config_overrides: '{{ ceph_client_ceph_conf_overrides }}'
notify:
@ -62,7 +62,8 @@
- inventory_hostname in groups[item]
with_items: "{{ ceph_extra_config_groups }}"
- include_tasks: ceph_config_extra.yml
- name: Including ceph_config_extra tasks
include_tasks: ceph_config_extra.yml
when:
- ceph_in_extra_config_group is defined
- ceph_in_extra_config_group | bool

View File

@ -27,9 +27,9 @@
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: 0644
owner: root
group: root
mode: "0644"
notify:
- Restart os services
with_items: "{{ ceph_extra_confs }}"
@ -43,7 +43,8 @@
section: "client.{{ item.client_name }}"
option: keyring
value: "{{ item.keyring_dest }}"
with_items: "{{ ceph_extra_confs }}"
mode: "0640"
with_items: "{{ ceph_extra_confs }}"
when:
- item.src is defined
- item.dest is defined

View File

@ -36,22 +36,23 @@
- name: From files | Provision ceph client keyrings
copy:
dest: "/etc/ceph/{{ ceph_cluster_name }}.client.{{ item['name'] }}.keyring"
content: "{{ ceph_client_keys[item['name']] }}\n" # noqa 206
owner: "{{ client['owner'] | default('root') }}"
group: "{{ client['group'] | default(cephkeys_access_group) }}"
content: |
{{ ceph_client_keys[item['name']] }}
owner: "{{ client['owner'] | default('root') }}"
group: "{{ client['group'] | default(cephkeys_access_group) }}"
# ideally the permission will be: 0600 and the owner/group will be either
# glance , nova or cinder. For keys that require access by different users
# (the cinder one) we should probably create a group 'cephkeys' and add
# nova/cinder to it.
# If I'm correct, the use case for multiple users is on the computre nodes,
# access needed by users libvirt-qemu and nova
mode: "{{ client['mode'] | default ('0640') }}"
mode: "{{ client['mode'] | default('0640') }}"
with_items: "{{ ceph_client_filtered_clients }}"
notify:
- Restart os services
- name: From file | Retrieve nova secret
set_fact :
set_fact:
ceph_nova_secret:
stdout: "{{ (ceph_client_keys[nova_ceph_client] | regex_search('.*^\\s*key\\s*=\\s*(.*)$.*', '\\1', multiline=True))[0] }}"
when:

View File

@ -15,12 +15,14 @@
## Ceph client keyrings
#TODO: also be able to create users, keys and pools on ceph
# TODO: also be able to create users, keys and pools on ceph
- name: Retrieve keyrings for openstack clients
# the first get makes sure the client exists, so the second only runs when it
# exists, the trick is the different output of both, the second has the right
# output to put in a keyring; ceph admin should have already created the user
shell: "ceph auth get client.{{ item['name'] }} --cluster {{ ceph_cluster_name }} >/dev/null && ceph auth get-or-create client.{{ item['name'] }} --cluster {{ ceph_cluster_name }}"
shell: >-
ceph auth get client.{{ item['name'] }} --cluster {{ ceph_cluster_name }} >/dev/null &&
ceph auth get-or-create client.{{ item['name'] }} --cluster {{ ceph_cluster_name }}
with_items: "{{ ceph_client_filtered_clients }}"
changed_when: false
delegate_to: '{{ ceph_mon_host }}'
@ -35,19 +37,19 @@
# TODO: do we really need a template for this? what's the added value compare to
# ceph get-or-create ... ... -o file?
template:
src: ceph.client.keyring.j2
dest: "/etc/ceph/{{ ceph_cluster_name }}.client.{{ item.item['name'] }}.keyring"
backup: true
owner: "{{ item.item.owner | default('root') }}"
src: ceph.client.keyring.j2
dest: "/etc/ceph/{{ ceph_cluster_name }}.client.{{ item.item['name'] }}.keyring"
backup: true
owner: "{{ item.item.owner | default('root') }}"
# TODO
group: "{{ item.item.group | default(cephkeys_access_group) }}"
group: "{{ item.item.group | default(cephkeys_access_group) }}"
# ideally the permission will be: 0600 and the owner/group will be either
# glance , nova or cinder. For keys that require access by different users
# (the cinder one) we should probably create a group 'cephkeys' and add
# nova/cinder to it.
# If I'm correct, the use case for multiple users is on the computre nodes,
# access needed by users libvirt-qemu and nova
mode: "{{ item.item.mode | default('0640') }}"
mode: "{{ item.item.mode | default('0640') }}"
with_items: "{{ ceph_client_keyrings.results }}"
when:
- not item is skipped
@ -64,4 +66,3 @@
register: ceph_nova_secret
tags:
- always

View File

@ -35,7 +35,16 @@
systemd_overrides:
Service:
UMask: "{{ ceph_immutable_object_cache_umask }}"
ExecStart: "{{ ['', '/usr/bin/ceph-immutable-object-cache -f --cluster ${CLUSTER} --name client.immutable-object-cache --setuser ' ~ ceph_immutable_object_cache_owner ~ ' --setgroup ' ~ ceph_immutable_object_cache_group ] }}"
ExecStart: >-
{{
[
'',
'/usr/bin/ceph-immutable-object-cache -f --cluster ${CLUSTER} --name client.immutable-object-cache --setuser ' ~
ceph_immutable_object_cache_owner ~
' --setgroup ' ~
ceph_immutable_object_cache_group
]
}}
- name: Ensure ceph immutable object cache service is running
service:

View File

@ -29,10 +29,15 @@
src: "gpg/{{ item.id }}"
dest: "{{ item.file }}"
mode: '0644'
with_items: "{{ ceph_gpg_keys | selectattr('file','defined') | list }}"
with_items: "{{ ceph_gpg_keys | selectattr('file', 'defined') | list }}"
- name: Add ceph apt-keys
apt_key: "{{ key }}"
apt_key:
data: "{{ key['data'] | default(omit) }}"
file: "{{ key['file'] | default(omit) }}"
id: "{{ key['id'] | default(omit) }}"
state: "{{ key['state'] | default('present') }}"
url: "{{ key['url'] | default(omit) }}"
with_items: "{{ ceph_gpg_keys }}"
loop_control:
loop_var: key

View File

@ -15,8 +15,9 @@
- name: Download EPEL gpg keys
get_url:
url: "{{ ceph_centos_epel_key }}"
dest: /etc/pki/rpm-gpg
url: "{{ ceph_centos_epel_key }}"
dest: /etc/pki/rpm-gpg
mode: "0640"
register: _get_yum_keys
until: _get_yum_keys is success
retries: 5
@ -54,7 +55,10 @@
- ceph_pkg_source == 'ceph'
- name: Install Ceph gpg keys
rpm_key: "{{ key }}"
rpm_key:
key: "{{ key['key'] }}"
fingerprint: "{{ key['fingerprint'] | default(omit) }}"
state: "{{ key['state'] | default('present') }}"
with_items: "{{ ceph_gpg_keys }}"
loop_control:
loop_var: key

View File

@ -41,38 +41,45 @@
when:
- ceph_pkg_source not in ['ceph', 'distro']
- include_tasks: "ceph_preinstall_{{ ansible_facts['pkg_mgr'] }}.yml"
- name: Including ceph_preinstall tasks
include_tasks: "ceph_preinstall_{{ ansible_facts['pkg_mgr'] }}.yml"
when:
- ceph_pkg_source != 'distro'
tags:
- ceph-install
- include_tasks: ceph_install.yml
- name: Including ceph_install tasks
include_tasks: ceph_install.yml
tags:
- ceph-install
- include_tasks: ceph_install_python_libs.yml
- name: Including ceph_install_python_libs tasks
include_tasks: ceph_install_python_libs.yml
when:
- openstack_service_venv_bin | length > 0
tags:
- ceph-install
- include_tasks: ceph_get_mon_host.yml
- name: Including ceph_get_mon_host tasks
include_tasks: ceph_get_mon_host.yml
when: ceph_conf_file is not defined or ceph_keyrings_dir is not defined
tags:
- ceph-config
- include_tasks: ceph_config.yml
- name: Including ceph_config tasks
include_tasks: ceph_config.yml
tags:
- ceph-config
- include_tasks: ceph_auth.yml
- name: Including ceph_auth tasks
include_tasks: ceph_auth.yml
when:
- cephx | bool
tags:
- ceph-config
- include_tasks: ceph_immutable_object_cache.yml
- name: Including ceph_immutable_object_cache tasks
include_tasks: ceph_immutable_object_cache.yml
when:
- ceph_immutable_object_cache_enabled | bool
tags:

View File

@ -23,17 +23,17 @@ ceph_components:
- name: '{{ glance_ceph_client }}'
service: '{{ ceph_glance_service_names }}'
- component: cinder_volume
package: "{{ [ 'ceph-common' ] + python_ceph_packages }}"
package: "{{ ['ceph-common'] + python_ceph_packages }}"
client:
- name: '{{ cinder_ceph_client }}'
service: '{{ ceph_cinder_service_names }}'
- component: cinder_backup
package: "{{ [ 'ceph-common' ] + python_ceph_packages }}"
package: "{{ ['ceph-common'] + python_ceph_packages }}"
client:
- name: '{{ cinder_backup_ceph_client }}'
service: '{{ ceph_cinder_service_names }}'
- component: nova_compute
package: "{{ (libvirt_packages + [ 'ceph-common' ] + ceph_immutable_object_cache_packages + python_ceph_packages) | select }}"
package: "{{ (libvirt_packages + ['ceph-common'] + ceph_immutable_object_cache_packages + python_ceph_packages) | select }}"
client:
- name: "{{ nova_ceph_client }}"
- name: "{{ immutable_object_cache_client }}"
@ -42,7 +42,7 @@ ceph_components:
enabled: "{{ ceph_immutable_object_cache_enabled }}"
service: '{{ ceph_nova_service_names }}'
- component: manila_share
package: "{{ ['ceph-common'] + python_ceph_packages }}"
package: "{{ ['ceph-common'] + python_ceph_packages }}"
client:
- name: "{{ manila_ceph_client }}"
service: "{{ ceph_manila_service_names }}"