Merge "Uplift Ceph charts to the Mimic release"

This commit is contained in:
Zuul 2019-01-05 19:39:57 +00:00 committed by Gerrit Code Review
commit 0770465962
15 changed files with 61 additions and 42 deletions

View File

@ -25,13 +25,13 @@ release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_bootstrap: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
ceph_mds: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_mgr: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_mds: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
ceph_mgr: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
ceph_rbd_pool: 'docker.io/port/ceph-config-helper:v1.10.3'
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
image_repo_sync: docker.io/docker:17.07.0
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:
active: false
exclude:
@ -357,8 +357,8 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
if [[ ${test_luminous} -gt 0 ]]; then
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -24,12 +24,12 @@ deployment:
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_bootstrap: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
ceph_mon: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_mon: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
ceph_mon_check: 'docker.io/port/ceph-config-helper:v1.10.3'
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
image_repo_sync: docker.io/docker:17.07.0
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:
active: false
exclude:
@ -228,8 +228,8 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
if [[ ${test_luminous} -gt 0 ]]; then
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -29,8 +29,8 @@ eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
if [[ $(ceph -v | egrep -q "12.2|luminous"; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous release"
if [[ $(ceph -v | egrep -q "mimic|luminous"; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous/Mimic release"
exit 1
fi

View File

@ -20,11 +20,11 @@
images:
pull_policy: IfNotPresent
tags:
ceph_osd: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_osd: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
ceph_bootstrap: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
image_repo_sync: docker.io/docker:17.07.0
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:
active: false
exclude:
@ -124,7 +124,7 @@ conf:
# match the failure domain used on your CRUSH rules for pools. For example with a crush rule of
# rack_replicated_rule you would specify "rack" as the `failure_domain` to use.
# `failure_domain`: Set the CRUSH bucket type for your OSD to reside in. See the supported CRUSH configuration
# as listed here: Supported CRUSH configuration is listed here: http://docs.ceph.com/docs/luminous/rados/operations/crush-map/
# as listed here: Supported CRUSH configuration is listed here: http://docs.ceph.com/docs/mimic/rados/operations/crush-map/
# `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.
# `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used
# when using host based overrides.
@ -134,6 +134,7 @@ conf:
# NOTE(supamatt): Add a configurable option to reset the past interval time of a PG.
# This solves an open bug within Ceph Luminous releases. https://tracker.ceph.com/issues/21142
# Not required for Mimic releases.
osd_pg_interval_fix: "false"
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
@ -210,8 +211,8 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
if [[ ${test_luminous} -gt 0 ]]; then
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -28,12 +28,12 @@ release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_bootstrap: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
ceph_cephfs_provisioner: 'quay.io/external_storage/cephfs-provisioner:v1.1.0-k8s1.10'
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
ceph_rbd_provisioner: 'quay.io/external_storage/rbd-provisioner:v1.1.0-k8s1.10'
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
image_repo_sync: docker.io/docker:17.07.0
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:
active: false
exclude:
@ -161,8 +161,8 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
if [[ ${test_luminous} -gt 0 ]]; then
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -23,7 +23,7 @@ cp -va /tmp/ceph.conf /etc/ceph/ceph.conf
cat >> /etc/ceph/ceph.conf <<EOF
[client.rgw.$(hostname -s)]
rgw_frontends = "civetweb port=${RGW_CIVETWEB_PORT}"
rgw_frontends = "beast port=${RGW_FRONTEND_PORT}"
rgw_keystone_url = "${KEYSTONE_URL}"
rgw_keystone_admin_user = "${OS_USERNAME}"
rgw_keystone_admin_password = "${OS_PASSWORD}"

View File

@ -54,7 +54,7 @@ if [ ! -e "${RGW_KEYRING}" ]; then
chmod 0600 "${RGW_KEYRING}"
fi
RGW_FRONTENDS="civetweb port=${RGW_CIVETWEB_PORT}"
RGW_FRONTENDS="beast port=${RGW_FRONTEND_PORT}"
/usr/bin/radosgw \
--cluster "${CLUSTER}" \

View File

@ -83,7 +83,7 @@ spec:
{{- end }}
- name: KEYSTONE_URL
value: {{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | trimSuffix .Values.endpoints.identity.path.default | quote }}
- name: RGW_CIVETWEB_PORT
- name: RGW_FRONTEND_PORT
value: "{{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
command:
- /tmp/rgw-init-keystone.sh
@ -106,7 +106,7 @@ spec:
env:
- name: CLUSTER
value: "ceph"
- name: RGW_CIVETWEB_PORT
- name: RGW_FRONTEND_PORT
value: "{{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
command:
- /tmp/rgw-start.sh

View File

@ -27,9 +27,9 @@ images:
pull_policy: IfNotPresent
tags:
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
ceph_rgw: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_rgw: 'docker.io/ceph/daemon:master-0b3eb04-mimic-centos-7-x86_64'
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
image_repo_sync: docker.io/docker:17.07.0
image_repo_sync: 'docker.io/docker:17.07.0'
rgw_s3_admin: 'docker.io/port/ceph-config-helper:v1.10.3'
ks_endpoints: 'docker.io/openstackhelm/heat:newton'
ks_service: 'docker.io/openstackhelm/heat:newton'
@ -62,11 +62,11 @@ pod:
enabled: false
rgw:
requests:
memory: "5Mi"
memory: "128Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
memory: "512Mi"
cpu: "1000m"
jobs:
ceph-rgw-storage-init:
requests:
@ -294,6 +294,8 @@ conf:
rgw_thread_pool_size: 512
rgw_num_rados_handles: 4
rgw_override_bucket_index_max_shards: 8
#NOTE (supamatt): Unfortunately we do not conform to S3 compliant names with some of our charts
rgw_relaxed_s3_bucket_names: true
rgw_s3:
enabled: false
admin_caps: "users=*;buckets=*;zone=*"
@ -364,8 +366,8 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous")
if [[ ${test_luminous} -gt 0 ]]; then
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous")
if [[ ${test_version} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -28,8 +28,8 @@ set -ex
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
if [[ ${test_luminous} -gt 0 ]]; then
local test_version=$(ceph tell osd.* version | egrep -c "mimic|luminous" | xargs echo)
if [[ ${test_mimic} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -28,7 +28,7 @@ sudo sed -i '/^::1/c\::1 localhost6 localhost6.localdomain6' /etc/hosts
# NOTE: Install required packages on host
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv 460F3994
RELEASE_NAME=$(grep 'CODENAME' /etc/lsb-release | awk -F= '{print $2}')
sudo add-apt-repository "deb https://download.ceph.com/debian-luminous/ ${RELEASE_NAME} main"
sudo add-apt-repository "deb https://download.ceph.com/debian-mimic/ ${RELEASE_NAME} main"
sudo -E apt-get update
sudo -E apt-get install -y \
docker.io \

View File

@ -23,7 +23,7 @@
- name: ubuntu | ensure community ceph repository exists
when: ansible_distribution == 'Ubuntu'
apt_repository:
repo: "deb https://download.ceph.com/debian-luminous/ {{ ansible_lsb.codename }} main"
repo: "deb https://download.ceph.com/debian-mimic/ {{ ansible_lsb.codename }} main"
state: present
update_cache: yes
@ -33,7 +33,7 @@
name: ceph
description: "Ceph community packages for Redhat/Centos"
gpgkey: "https://download.ceph.com/keys/release.asc"
baseurl: "https://download.ceph.com/rpm-luminous/el7/$basearch"
baseurl: "https://download.ceph.com/rpm-mimic/el7/$basearch"
gpgcheck: yes
state: present
@ -73,6 +73,14 @@
- ceph-common
- rbd-nbd
- name: ubuntu | uninstall packages
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
apt:
name: "{{item}}"
state: absent
with_items:
- ceph
- name: centos | installing packages
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
yum:
@ -82,6 +90,14 @@
- ceph-common
- rbd-nbd
- name: centos | installing packages
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
yum:
name: "{{item}}"
state: absent
with_items:
- ceph
- name: blacklist kernel RBD driver module
when: kubelet.pv_support_ceph
copy:

View File

@ -2,7 +2,7 @@ FROM docker.io/ubuntu:xenial
MAINTAINER pete.birley@att.com
ARG LIBVIRT_VERSION=ocata
ARG CEPH_RELEASE=luminous
ARG CEPH_RELEASE=mimic
ARG PROJECT=nova
ARG UID=42424
ARG GID=42424

View File

@ -18,7 +18,7 @@ SHELL := /bin/bash
LIBVIRT_VERSION ?= 1.3.1-1ubuntu10.24
DISTRO ?= ubuntu
DISTRO_RELEASE ?= xenial
CEPH_RELEASE ?= luminous
CEPH_RELEASE ?= mimic
DOCKER_REGISTRY ?= docker.io
IMAGE_NAME ?= libvirt

View File

@ -33,7 +33,7 @@ repo run:
LIBVIRT_VERSION=1.3.1-1ubuntu10.24
DISTRO=ubuntu
DISTRO_RELEASE=xenial
CEPH_RELEASE=luminous
CEPH_RELEASE=mimic
sudo docker build \
--network=host \