[CEPH] OSH: use loopback devices for ceph osds.

- This is to make use of loopback devices for ceph osds since
support for directory backed osds going to depricate.

- Move to bluestore from filestore for ceph-osds.

Change-Id: Ia95c9ceb81f7d253dd582a2e753a6ed8fe60a04d
This commit is contained in:
Chinasubbareddy Mallavarapu 2020-05-14 09:24:22 -05:00
parent 84b27c895d
commit a385c18176
7 changed files with 70 additions and 18 deletions

View File

@ -54,6 +54,11 @@ containers before removing the directories used on the host by pods.
sudo rm -rf /var/lib/nova/*
sudo rm -rf /var/lib/libvirt/*
sudo rm -rf /etc/libvirt/qemu/*
#NOTE(chinasubbareddy) cleanup LVM volume groups in case of disk backed ceph osd deployments
for VG in `vgs|grep -v VG|grep -i ceph|awk '{print $1}'`; do
echo $VG
vgremove -y $VG
done
# NOTE(portdirect): Clean up mounts left behind by kubernetes pods
sudo findmnt --raw | awk '/^\/var\/lib\/kubelet\/pods/ { print $1 }' | xargs -r -L1 -P16 sudo umount -f -l

View File

@ -104,11 +104,20 @@ Alternatively, this step can be performed by running the script directly:
OSH_DEPLOY_MULTINODE=True ./tools/deployment/component/common/ingress.sh
Create loopback devices for CEPH
--------------------------------
Create two loopback devices for ceph as one disk for OSD data and other disk for
block DB and block WAL.
.. code-block:: shell
ansible all -i /opt/openstack-helm-infra/tools/gate/devel/multinode-inventory.yaml -m shell -s -a "/opt/openstack-helm/tools/deployment/common/setup-ceph-loopback-device.sh"
Deploy Ceph
-----------
The script below configures Ceph to use filesystem directory-based storage.
The script below configures Ceph to use loopback devices created in previous step as backend for ceph osds.
To configure a custom block device-based backend, please refer
to the ``ceph-osd`` `values.yaml <https://github.com/openstack/openstack-helm/blob/master/ceph-osd/values.yaml>`_.

View File

@ -0,0 +1,13 @@
#!/bin/bash
set -xe
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop0 /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop1 /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
#lets verify the devices
sudo df -lh
sudo lsblk

View File

@ -154,17 +154,21 @@ conf:
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
type: bluestore
location: /dev/loop0
block_db:
location: /dev/loop1
size: "5GB"
block_wal:
location: /dev/loop1
size: "2GB"
pod:
replicas:
mds: 1
mgr: 1
rgw: 1
deploy:
tool: "ceph-volume"
EOF

View File

@ -14,6 +14,18 @@
set -xe
# Create loop back devices for ceph osds.
sudo df -lh
sudo lsblk
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo truncate -s 10G /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
sudo losetup /dev/loop0 /var/lib/openstack-helm/ceph/ceph-osd-data-loopbackfile.img
sudo losetup /dev/loop1 /var/lib/openstack-helm/ceph/ceph-osd-db-wal-loopbackfile.img
# lets check the devices
sudo df -lh
sudo lsblk
#NOTE: Lint and package chart
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
@ -143,13 +155,16 @@ conf:
replication: 1
percent_total_data: 34.8
storage:
osd:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
type: bluestore
location: /dev/loop0
block_db:
location: /dev/loop1
size: "5GB"
block_wal:
location: /dev/loop1
size: "2GB"
pod:
replicas:

View File

@ -56,11 +56,14 @@ conf:
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
type: bluestore
location: /dev/loop0
block_db:
location: /dev/loop1
size: "5GB"
block_wal:
location: /dev/loop1
size: "2GB"
storageclass:
cephfs:
provision_storage_class: false

View File

@ -85,6 +85,7 @@
container_distro_version: bionic
gate_scripts:
- ./tools/deployment/common/install-packages.sh
- ./tools/deployment/common/setup-ceph-loopback-device.sh
- ./tools/deployment/common/deploy-k8s.sh
- ./tools/deployment/common/setup-client.sh
- ./tools/scripts/tls/cert-manager.sh
@ -309,6 +310,7 @@
feature_gates: apparmor
gate_scripts:
- ./tools/deployment/common/install-packages.sh
- ./tools/deployment/common/setup-ceph-loopback-device.sh
- ./tools/deployment/common/deploy-k8s.sh
- ./tools/deployment/common/setup-client.sh
- ./tools/deployment/component/ceph/ceph.sh
@ -399,6 +401,7 @@
feature_gates: netpol
gate_scripts:
- ./tools/deployment/common/install-packages.sh
- ./tools/deployment/common/setup-ceph-loopback-device.sh
- ./tools/deployment/common/deploy-k8s.sh
- ./tools/deployment/common/setup-client.sh
- ./tools/deployment/component/ceph/ceph.sh