openstack-helm-infra/ceph-rgw/values.yaml

491 lines
12 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for ceph-client.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
deployment:
ceph: false
rgw_keystone_user_and_endpoints: false
release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
ceph_rgw: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
image_repo_sync: docker.io/docker:17.07.0
rgw_s3_admin: 'docker.io/port/ceph-config-helper:v1.10.3'
ks_endpoints: 'docker.io/openstackhelm/heat:newton'
ks_service: 'docker.io/openstackhelm/heat:newton'
ks_user: 'docker.io/openstackhelm/heat:newton'
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
rgw:
node_selector_key: ceph-rgw
node_selector_value: enabled
pod:
dns_policy: "ClusterFirstWithHostNet"
replicas:
rgw: 2
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
resources:
enabled: false
rgw:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
jobs:
ceph-rgw-storage-init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks-endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
rgw_s3_admin:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ceph_client:
configmap: ceph-etc
secrets:
keyrings:
mon: ceph-mon-keyring
mds: ceph-bootstrap-mds-keyring
osd: ceph-bootstrap-osd-keyring
rgw: os-ceph-bootstrap-rgw-keyring
mgr: ceph-bootstrap-mgr-keyring
admin: pvc-ceph-client-key
identity:
admin: ceph-keystone-admin
swift: ceph-keystone-user
user_rgw: ceph-keystone-user-rgw
rgw_s3:
admin: radosgw-s3-admin-creds
tls:
object_store:
api:
public: ceph-tls-public
network:
api:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/proxy-max-temp-file-size: "0"
external_policy_local: false
node_port:
enabled: false
port: 30004
public: 192.168.0.0/16
cluster: 192.168.0.0/16
conf:
templates:
keyring:
admin: |
[client.admin]
key = {{ key }}
auid = 0
caps mds = "allow"
caps mon = "allow *"
caps osd = "allow *"
caps mgr = "allow *"
bootstrap:
rgw: |
[client.bootstrap-rgw]
key = {{ key }}
caps mgr = "allow profile bootstrap-rgw"
features:
rgw: true
pool:
#NOTE(portdirect): this drives a simple approximation of
# https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
# expected number of osds in a cluster, and the `target.pg_per_osd` should be
# set to match the desired number of placement groups on each OSD.
crush:
#NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
# kernel this should be set to `hammer`
tunables: null
target:
#NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
# to match the number of nodes in the OSH gate.
osd: 5
pg_per_osd: 100
default:
#NOTE(portdirect): this should be 'same_host' for a single node
# cluster to be in a healthy state
crush_rule: replicated_rule
#NOTE(portdirect): this section describes the pools that will be managed by
# the ceph pool management job, as it tunes the pgs and crush rule, based on
# the above.
spec:
# RBD pool
- name: rbd
application: rbd
replication: 3
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 3
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 3
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 3
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 3
percent_total_data: 34.8
rgw_ks:
enabled: false
config:
rgw_keystone_api_version: 3
rgw_keystone_accepted_roles: "admin, member"
rgw_keystone_implicit_tenants: true
rgw_keystone_make_new_tenants: true
rgw_s3_auth_use_keystone: true
rgw_swift_account_in_url: true
rgw_swift_url: null
#NOTE (portdirect): See http://tracker.ceph.com/issues/21226
rgw_keystone_token_cache_size: 0
#NOTE (JCL): See http://tracker.ceph.com/issues/7073
rgw_gc_max_objs: 997
#NOTE (JCL): See http://tracker.ceph.com/issues/24937
#NOTE (JCL): See https://tracker.ceph.com/issues/24551
rgw_dynamic_resharding: false
#NOTE (supamatt): Increase threads and rados handles for better performance
rgw_thread_pool_size: 512
rgw_num_rados_handles: 4
rgw_s3:
enabled: false
admin_caps: "users=*;buckets=*;zone=*"
ceph:
global:
# auth
cephx: true
cephx_require_signatures: false
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
objecter_inflight_op_bytes: "1073741824"
objecter_inflight_ops: 10240
debug_ms: "0/0"
osd:
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_max_object_name_len: 256
ms_bind_port_min: 6800
ms_bind_port_max: 7100
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- ceph-client-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
bootstrap:
jobs: null
services:
- endpoint: internal
service: ceph_mon
rgw:
jobs:
- ceph-rgw-storage-init
services:
- endpoint: internal
service: keystone-api
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
ks_endpoints:
jobs:
- ceph-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
rgw_s3_admin:
services:
- endpoint: internal
service: ceph_object_store
bootstrap:
enabled: false
script: |
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous")
if [[ ${test_luminous} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}
#ensure_pool volumes 8 cinder
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
identity:
name: keystone
namespace: null
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
swift:
role: admin
region_name: RegionOne
username: swift
password: password
project_name: service
user_domain_name: service
project_domain_name: service
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
api:
default: 80
internal: 5000
object_store:
name: swift
namespace: null
hosts:
default: ceph-rgw
public: radosgw
host_fqdn_override:
default: null
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
path:
default: /swift/v1/KEY_$(tenant_id)s
scheme:
default: http
port:
api:
default: 8088
public: 80
ceph_object_store:
name: radosgw
namespace: null
auth:
admin:
# NOTE(srwilkers): These defaults should be used for testing only, and
# should be changed before deploying to production
username: s3_admin
access_key: "admin_access_key"
secret_key: "admin_secret_key"
hosts:
default: ceph-rgw
public: radosgw
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
api:
default: 8088
public: 80
ceph_mon:
namespace: null
hosts:
default: ceph-mon
discovery: ceph-mon-discovery
host_fqdn_override:
default: null
port:
mon:
default: 6789
manifests:
configmap_ceph_templates: true
configmap_bin: true
configmap_bin_ks: true
configmap_etc: true
deployment_rgw: true
ingress_rgw: true
job_ceph_rgw_storage_init: true
job_image_repo_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
job_s3_admin: true
secret_s3_rgw: true
secret_keystone_rgw: true
secret_ingress_tls: true
secret_keystone_rgw: true
secret_keystone: true
service_ingress_rgw: true
service_rgw: true