Merge "Merge ceph charm into ceph-mon"

This commit is contained in:
Jenkins 2016-03-29 15:15:29 +00:00 committed by Gerrit Code Review
commit cfd3612b8d
33 changed files with 1169 additions and 100 deletions

4
.gitignore vendored
View File

@ -1,8 +1,8 @@
bin
.idea
.coverage
.testrepository
.tox
*.sw[nop]
.idea
*.pyc
.idea
*.pyc

View File

@ -1,4 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/charm-ceph-mon.git
project=openstack/charm-ceph-mon.git

View File

@ -9,15 +9,15 @@ juju
# Usage
The ceph charm has two pieces of mandatory configuration for which no defaults
are provided. You _must_ set these configuration options before deployment or the charm will not work:
are provided. You _must_ set these configuration options before deployment or the charm will not work:
fsid:
uuid specific to a ceph cluster used to ensure that different
clusters don't get mixed up - use `uuid` to generate one.
monitor-secret:
monitor-secret:
a ceph generated key used by the daemons that manage to cluster
to control security. You can use the ceph-authtool command to
to control security. You can use the ceph-authtool command to
generate one:
ceph-authtool /dev/stdout --name=mon. --gen-key
@ -30,7 +30,7 @@ At a minimum you must provide a juju config file during initial deployment
with the fsid and monitor-secret options (contents of cepy.yaml below):
ceph:
fsid: ecbb8960-0e21-11e2-b495-83a88f44db01
fsid: ecbb8960-0e21-11e2-b495-83a88f44db01
monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg==
Boot things up by using:

View File

@ -39,3 +39,178 @@ remove-cache-tier:
as the hot pool
required: [backer-pool, cache-pool]
additionalProperties: false
create-pool:
description: Creates a pool
params:
name:
type: string
description: The name of the pool
profile-name:
type: string
description: The crush profile to use for this pool. The ruleset must exist first.
pool-type:
type: string
default: "replicated"
enum: [replicated, erasure]
description: |
The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the
objects or erasure to get a kind of generalized RAID5 capability.
replicas:
type: integer
default: 3
description: |
For the replicated pool this is the number of replicas to store of each object.
erasure-profile-name:
type: string
default: default
description: |
The name of the erasure coding profile to use for this pool. Note this profile must exist
before calling create-pool
required: [name]
additionalProperties: false
create-erasure-profile:
description: Create a new erasure code profile to use on a pool.
params:
name:
type: string
description: The name of the profile
failure-domain:
type: string
default: host
enum: [chassis, datacenter, host, osd, pdu, pod, rack, region, room, root, row]
description: |
The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host.
plugin:
type: string
default: "jerasure"
enum: [jerasure, isa, lrc, shec]
description: |
The erasure plugin to use for this profile.
See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details
data-chunks:
type: integer
default: 3
description: |
The number of data chunks, i.e. the number of chunks in which the original object is divided. For instance
if K = 2 a 10KB object will be divided into K objects of 5KB each.
coding-chunks:
type: integer
default: 2
description: |
The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions.
If there are 2 coding chunks, it means 2 OSDs can be out without losing data.
locality-chunks:
type: integer
description: |
Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3
two groups of three are created. Each set can be recovered without reading chunks from another set.
durability-estimator:
type: integer
description: |
The number of parity chunks each of which includes each data chunk in its calculation range. The number is used
as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data.
required: [name, data-chunks, coding-chunks]
additionalProperties: false
get-erasure-profile:
description: Display an erasure code profile.
params:
name:
type: string
description: The name of the profile
required: [name]
additionalProperties: false
delete-erasure-profile:
description: Deletes an erasure code profile.
params:
name:
type: string
description: The name of the profile
required: [name]
additionalProperties: false
list-erasure-profiles:
description: List the names of all erasure code profiles
additionalProperties: false
list-pools:
description: List your clusters pools
additionalProperties: false
set-pool-max-bytes:
description: Set pool quotas for the maximum number of bytes.
params:
max:
type: integer
description: The name of the pool
pool-name:
type: string
description: The name of the pool
required: [pool-name, max]
additionalProperties: false
delete-pool:
description: Deletes the named pool
params:
pool-name:
type: string
description: The name of the pool
required: [pool-name]
additionalProperties: false
rename-pool:
description: Rename a pool
params:
pool-name:
type: string
description: The name of the pool
new-name:
type: string
description: The new name of the pool
required: [pool-name, new-name]
additionalProperties: false
pool-statistics:
description: Show a pools utilization statistics
additionalProperties: false
snapshot-pool:
description: Snapshot a pool
params:
pool-name:
type: string
description: The name of the pool
snapshot-name:
type: string
description: The name of the snapshot
required: [snapshot-name, pool-name]
additionalProperties: false
remove-pool-snapshot:
description: Remove a pool snapshot
params:
pool-name:
type: string
description: The name of the pool
snapshot-name:
type: string
description: The name of the snapshot
required: [snapshot-name, pool-name]
additionalProperties: false
pool-set:
description: Set a value for the pool
params:
pool-name:
type: string
description: The pool to set this variable on.
key:
type: string
description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
value:
type: string
description: The value to set
required: [key, value, pool-name]
additionalProperties: false
pool-get:
description: Get a value for the pool
params:
pool-name:
type: string
description: The pool to get this variable from.
key:
type: string
description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values
required: [key, pool-name]
additionalProperties: false

View File

@ -1 +1,3 @@
__author__ = 'chris'
import sys
sys.path.append('hooks')

103
actions/ceph_ops.py Executable file
View File

@ -0,0 +1,103 @@
__author__ = 'chris'
from subprocess import CalledProcessError, check_output
import sys
sys.path.append('hooks')
import rados
from charmhelpers.core.hookenv import log, action_get, action_fail
from charmhelpers.contrib.storage.linux.ceph import pool_set, \
set_pool_quota, snapshot_pool, remove_pool_snapshot
# Connect to Ceph via Librados and return a connection
def connect():
try:
cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
cluster.connect()
return cluster
except (rados.IOError,
rados.ObjectNotFound,
rados.NoData,
rados.NoSpace,
rados.PermissionError) as rados_error:
log("librados failed with error: {}".format(str(rados_error)))
def create_crush_rule():
# Shell out
pass
def list_pools():
try:
cluster = connect()
pool_list = cluster.list_pools()
cluster.shutdown()
return pool_list
except (rados.IOError,
rados.ObjectNotFound,
rados.NoData,
rados.NoSpace,
rados.PermissionError) as e:
action_fail(e.message)
def pool_get():
key = action_get("key")
pool_name = action_get("pool_name")
try:
value = check_output(['ceph', 'osd', 'pool', 'get', pool_name, key])
return value
except CalledProcessError as e:
action_fail(e.message)
def set_pool():
key = action_get("key")
value = action_get("value")
pool_name = action_get("pool_name")
pool_set(service='ceph', pool_name=pool_name, key=key, value=value)
def pool_stats():
try:
pool_name = action_get("pool-name")
cluster = connect()
ioctx = cluster.open_ioctx(pool_name)
stats = ioctx.get_stats()
ioctx.close()
cluster.shutdown()
return stats
except (rados.Error,
rados.IOError,
rados.ObjectNotFound,
rados.NoData,
rados.NoSpace,
rados.PermissionError) as e:
action_fail(e.message)
def delete_pool_snapshot():
pool_name = action_get("pool-name")
snapshot_name = action_get("snapshot-name")
remove_pool_snapshot(service='ceph',
pool_name=pool_name,
snapshot_name=snapshot_name)
# Note only one or the other can be set
def set_pool_max_bytes():
pool_name = action_get("pool-name")
max_bytes = action_get("max")
set_pool_quota(service='ceph',
pool_name=pool_name,
max_bytes=max_bytes)
def snapshot_ceph_pool():
pool_name = action_get("pool-name")
snapshot_name = action_get("snapshot-name")
snapshot_pool(service='ceph',
pool_name=pool_name,
snapshot_name=snapshot_name)

89
actions/create-erasure-profile Executable file
View File

@ -0,0 +1,89 @@
#!/usr/bin/python
from subprocess import CalledProcessError
import sys
sys.path.append('hooks')
from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile
from charmhelpers.core.hookenv import action_get, log, action_fail
def make_erasure_profile():
name = action_get("name")
plugin = action_get("plugin")
failure_domain = action_get("failure-domain")
# jerasure requires k+m
# isa requires k+m
# local requires k+m+l
# shec requires k+m+c
if plugin == "jerasure":
k = action_get("data-chunks")
m = action_get("coding-chunks")
try:
create_erasure_profile(service='admin',
erasure_plugin_name=plugin,
profile_name=name,
data_chunks=k,
coding_chunks=m,
failure_domain=failure_domain)
except CalledProcessError as e:
log(e)
action_fail("Create erasure profile failed with "
"message: {}".format(e.message))
elif plugin == "isa":
k = action_get("data-chunks")
m = action_get("coding-chunks")
try:
create_erasure_profile(service='admin',
erasure_plugin_name=plugin,
profile_name=name,
data_chunks=k,
coding_chunks=m,
failure_domain=failure_domain)
except CalledProcessError as e:
log(e)
action_fail("Create erasure profile failed with "
"message: {}".format(e.message))
elif plugin == "local":
k = action_get("data-chunks")
m = action_get("coding-chunks")
l = action_get("locality-chunks")
try:
create_erasure_profile(service='admin',
erasure_plugin_name=plugin,
profile_name=name,
data_chunks=k,
coding_chunks=m,
locality=l,
failure_domain=failure_domain)
except CalledProcessError as e:
log(e)
action_fail("Create erasure profile failed with "
"message: {}".format(e.message))
elif plugin == "shec":
k = action_get("data-chunks")
m = action_get("coding-chunks")
c = action_get("durability-estimator")
try:
create_erasure_profile(service='admin',
erasure_plugin_name=plugin,
profile_name=name,
data_chunks=k,
coding_chunks=m,
durability_estimator=c,
failure_domain=failure_domain)
except CalledProcessError as e:
log(e)
action_fail("Create erasure profile failed with "
"message: {}".format(e.message))
else:
# Unknown erasure plugin
action_fail("Unknown erasure-plugin type of {}. "
"Only jerasure, isa, local or shec is "
"allowed".format(plugin))
if __name__ == '__main__':
make_erasure_profile()

38
actions/create-pool Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/python
import sys
sys.path.append('hooks')
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool
def create_pool():
pool_name = action_get("name")
pool_type = action_get("pool-type")
try:
if pool_type == "replicated":
replicas = action_get("replicas")
replicated_pool = ReplicatedPool(name=pool_name,
service='admin',
replicas=replicas)
replicated_pool.create()
elif pool_type == "erasure":
crush_profile_name = action_get("erasure-profile-name")
erasure_pool = ErasurePool(name=pool_name,
erasure_code_profile=crush_profile_name,
service='admin')
erasure_pool.create()
else:
log("Unknown pool type of {}. Only erasure or replicated is "
"allowed".format(pool_type))
action_fail("Unknown pool type of {}. Only erasure or replicated "
"is allowed".format(pool_type))
except CalledProcessError as e:
action_fail("Pool creation failed because of a failed process. "
"Ret Code: {} Message: {}".format(e.returncode, e.message))
if __name__ == '__main__':
create_pool()

24
actions/delete-erasure-profile Executable file
View File

@ -0,0 +1,24 @@
#!/usr/bin/python
from subprocess import CalledProcessError
__author__ = 'chris'
import sys
sys.path.append('hooks')
from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile
from charmhelpers.core.hookenv import action_get, log, action_fail
def delete_erasure_profile():
name = action_get("name")
try:
remove_erasure_profile(service='admin', profile_name=name)
except CalledProcessError as e:
action_fail("Remove erasure profile failed with error: {}".format(
e.message))
if __name__ == '__main__':
delete_erasure_profile()

28
actions/delete-pool Executable file
View File

@ -0,0 +1,28 @@
#!/usr/bin/python
import sys
sys.path.append('hooks')
import rados
from ceph_ops import connect
from charmhelpers.core.hookenv import action_get, log, action_fail
def remove_pool():
try:
pool_name = action_get("name")
cluster = connect()
log("Deleting pool: {}".format(pool_name))
cluster.delete_pool(str(pool_name)) # Convert from unicode
cluster.shutdown()
except (rados.IOError,
rados.ObjectNotFound,
rados.NoData,
rados.NoSpace,
rados.PermissionError) as e:
log(e)
action_fail(e)
if __name__ == '__main__':
remove_pool()

18
actions/get-erasure-profile Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/python
__author__ = 'chris'
import sys
sys.path.append('hooks')
from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile
from charmhelpers.core.hookenv import action_get, action_set
def make_erasure_profile():
name = action_get("name")
out = get_erasure_profile(service='admin', name=name)
action_set({'message': out})
if __name__ == '__main__':
make_erasure_profile()

22
actions/list-erasure-profiles Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/python
__author__ = 'chris'
import sys
from subprocess import check_output, CalledProcessError
sys.path.append('hooks')
from charmhelpers.core.hookenv import action_get, log, action_set, action_fail
if __name__ == '__main__':
name = action_get("name")
try:
out = check_output(['ceph',
'--id', 'admin',
'osd',
'erasure-code-profile',
'ls']).decode('UTF-8')
action_set({'message': out})
except CalledProcessError as e:
log(e)
action_fail("Listing erasure profiles failed with error: {}".format(
e.message))

17
actions/list-pools Executable file
View File

@ -0,0 +1,17 @@
#!/usr/bin/python
__author__ = 'chris'
import sys
from subprocess import check_output, CalledProcessError
sys.path.append('hooks')
from charmhelpers.core.hookenv import log, action_set, action_fail
if __name__ == '__main__':
try:
out = check_output(['ceph', '--id', 'admin',
'osd', 'lspools']).decode('UTF-8')
action_set({'message': out})
except CalledProcessError as e:
log(e)
action_fail("List pools failed with error: {}".format(e.message))

19
actions/pool-get Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/python
__author__ = 'chris'
import sys
from subprocess import check_output, CalledProcessError
sys.path.append('hooks')
from charmhelpers.core.hookenv import log, action_set, action_get, action_fail
if __name__ == '__main__':
name = action_get('pool-name')
key = action_get('key')
try:
out = check_output(['ceph', '--id', 'admin',
'osd', 'pool', 'get', name, key]).decode('UTF-8')
action_set({'message': out})
except CalledProcessError as e:
log(e)
action_fail("Pool get failed with message: {}".format(e.message))

23
actions/pool-set Executable file
View File

@ -0,0 +1,23 @@
#!/usr/bin/python
from subprocess import CalledProcessError
import sys
sys.path.append('hooks')
from charmhelpers.core.hookenv import action_get, log, action_fail
from ceph_broker import handle_set_pool_value
if __name__ == '__main__':
name = action_get("pool-name")
key = action_get("key")
value = action_get("value")
request = {'name': name,
'key': key,
'value': value}
try:
handle_set_pool_value(service='admin', request=request)
except CalledProcessError as e:
log(e.message)
action_fail("Setting pool key: {} and value: {} failed with "
"message: {}".format(key, value, e.message))

15
actions/pool-statistics Executable file
View File

@ -0,0 +1,15 @@
#!/usr/bin/python
import sys
sys.path.append('hooks')
from subprocess import check_output, CalledProcessError
from charmhelpers.core.hookenv import log, action_set, action_fail
if __name__ == '__main__':
try:
out = check_output(['ceph', '--id', 'admin',
'df']).decode('UTF-8')
action_set({'message': out})
except CalledProcessError as e:
log(e)
action_fail("ceph df failed with message: {}".format(e.message))

19
actions/remove-pool-snapshot Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/python
import sys
sys.path.append('hooks')
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot
if __name__ == '__main__':
name = action_get("pool-name")
snapname = action_get("snapshot-name")
try:
remove_pool_snapshot(service='admin',
pool_name=name,
snapshot_name=snapname)
except CalledProcessError as e:
log(e)
action_fail("Remove pool snapshot failed with message: {}".format(
e.message))

16
actions/rename-pool Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/python
import sys
sys.path.append('hooks')
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import rename_pool
if __name__ == '__main__':
name = action_get("pool-name")
new_name = action_get("new-name")
try:
rename_pool(service='admin', old_name=name, new_name=new_name)
except CalledProcessError as e:
log(e)
action_fail("Renaming pool failed with message: {}".format(e.message))

16
actions/set-pool-max-bytes Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/python
import sys
sys.path.append('hooks')
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import set_pool_quota
if __name__ == '__main__':
max_bytes = action_get("max")
name = action_get("pool-name")
try:
set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes)
except CalledProcessError as e:
log(e)
action_fail("Set pool quota failed with message: {}".format(e.message))

18
actions/snapshot-pool Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/python
import sys
sys.path.append('hooks')
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import snapshot_pool
if __name__ == '__main__':
name = action_get("pool-name")
snapname = action_get("snapshot-name")
try:
snapshot_pool(service='admin',
pool_name=name,
snapshot_name=snapname)
except CalledProcessError as e:
log(e)
action_fail("Snapshot pool failed with message: {}".format(e.message))

View File

@ -121,3 +121,7 @@ options:
description: |
A comma-separated list of nagios servicegroups.
If left empty, the nagios_context will be used as the servicegroup
use-direct-io:
default: True
type: boolean
description: Configure use of direct IO for OSD journals.

View File

@ -1,24 +1,71 @@
#!/usr/bin/python
#
# Copyright 2014 Canonical Ltd.
# Copyright 2015 Canonical Ltd.
#
import json
from charmhelpers.contrib.storage.linux.ceph import validator, \
erasure_profile_exists, ErasurePool, set_pool_quota, \
pool_set, snapshot_pool, remove_pool_snapshot, create_erasure_profile, \
ReplicatedPool, rename_pool, Pool, get_osds, pool_exists, delete_pool
from charmhelpers.core.hookenv import (
log,
DEBUG,
INFO,
ERROR,
)
from charmhelpers.contrib.storage.linux.ceph import (
create_pool,
get_osds,
pool_exists,
)
# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/
# This should do a decent job of preventing people from passing in bad values.
# It will give a useful error message
POOL_KEYS = {
# "Ceph Key Name": [Python type, [Valid Range]]
"size": [int],
"min_size": [int],
"crash_replay_interval": [int],
"pgp_num": [int], # = or < pg_num
"crush_ruleset": [int],
"hashpspool": [bool],
"nodelete": [bool],
"nopgchange": [bool],
"nosizechange": [bool],
"write_fadvise_dontneed": [bool],
"noscrub": [bool],
"nodeep-scrub": [bool],
"hit_set_type": [basestring, ["bloom", "explicit_hash",
"explicit_object"]],
"hit_set_count": [int, [1, 1]],
"hit_set_period": [int],
"hit_set_fpp": [float, [0.0, 1.0]],
"cache_target_dirty_ratio": [float],
"cache_target_dirty_high_ratio": [float],
"cache_target_full_ratio": [float],
"target_max_bytes": [int],
"target_max_objects": [int],
"cache_min_flush_age": [int],
"cache_min_evict_age": [int],
"fast_read": [bool],
}
CEPH_BUCKET_TYPES = [
'osd',
'host',
'chassis',
'rack',
'row',
'pdu',
'pod',
'room',
'datacenter',
'region',
'root'
]
def decode_req_encode_rsp(f):
"""Decorator to decode incoming requests and encode responses."""
def decode_inner(req):
return json.dumps(f(json.loads(req)))
@ -42,15 +89,14 @@ def process_requests(reqs):
resp['request-id'] = request_id
return resp
except Exception as exc:
log(str(exc), level=ERROR)
msg = ("Unexpected error occurred while processing requests: %s" %
(reqs))
reqs)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
msg = ("Missing or invalid api version (%s)" % (version))
msg = ("Missing or invalid api version (%s)" % version)
resp = {'exit-code': 1, 'stderr': msg}
if request_id:
resp['request-id'] = request_id
@ -58,6 +104,156 @@ def process_requests(reqs):
return resp
def handle_create_erasure_profile(request, service):
# "local" | "shec" or it defaults to "jerasure"
erasure_type = request.get('erasure-type')
# "host" | "rack" or it defaults to "host" # Any valid Ceph bucket
failure_domain = request.get('failure-domain')
name = request.get('name')
k = request.get('k')
m = request.get('m')
l = request.get('l')
if failure_domain not in CEPH_BUCKET_TYPES:
msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
create_erasure_profile(service=service, erasure_plugin_name=erasure_type,
profile_name=name, failure_domain=failure_domain,
data_chunks=k, coding_chunks=m, locality=l)
def handle_erasure_pool(request, service):
pool_name = request.get('name')
erasure_profile = request.get('erasure-profile')
quota = request.get('max-bytes')
if erasure_profile is None:
erasure_profile = "default-canonical"
# Check for missing params
if pool_name is None:
msg = "Missing parameter. name is required for the pool"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
# TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds
if not erasure_profile_exists(service=service, name=erasure_profile):
# TODO: Fail and tell them to create the profile or default
msg = "erasure-profile {} does not exist. Please create it with: " \
"create-erasure-profile".format(erasure_profile)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
pass
pool = ErasurePool(service=service, name=pool_name,
erasure_code_profile=erasure_profile)
# Ok make the erasure pool
if not pool_exists(service=service, name=pool_name):
log("Creating pool '%s' (erasure_profile=%s)" % (pool,
erasure_profile),
level=INFO)
pool.create()
# Set a quota if requested
if quota is not None:
set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota)
def handle_replicated_pool(request, service):
pool_name = request.get('name')
replicas = request.get('replicas')
quota = request.get('max-bytes')
# Optional params
pg_num = request.get('pg_num')
if pg_num:
# Cap pg_num to max allowed just in case.
osds = get_osds(service)
if osds:
pg_num = min(pg_num, (len(osds) * 100 // replicas))
# Check for missing params
if pool_name is None or replicas is None:
msg = "Missing parameter. name and replicas are required"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
pool = ReplicatedPool(service=service,
name=pool_name,
replicas=replicas,
pg_num=pg_num)
if not pool_exists(service=service, name=pool_name):
log("Creating pool '%s' (replicas=%s)" % (pool, replicas),
level=INFO)
pool.create()
else:
log("Pool '%s' already exists - skipping create" % pool,
level=DEBUG)
# Set a quota if requested
if quota is not None:
set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota)
def handle_create_cache_tier(request, service):
# mode = "writeback" | "readonly"
storage_pool = request.get('cold-pool')
cache_pool = request.get('hot-pool')
cache_mode = request.get('mode')
if cache_mode is None:
cache_mode = "writeback"
# cache and storage pool must exist first
if not pool_exists(service=service, name=storage_pool) or not pool_exists(
service=service, name=cache_pool):
msg = "cold-pool: {} and hot-pool: {} must exist. Please create " \
"them first".format(storage_pool, cache_pool)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
p = Pool(service=service, name=storage_pool)
p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode)
def handle_remove_cache_tier(request, service):
storage_pool = request.get('cold-pool')
cache_pool = request.get('hot-pool')
# cache and storage pool must exist first
if not pool_exists(service=service, name=storage_pool) or not pool_exists(
service=service, name=cache_pool):
msg = "cold-pool: {} or hot-pool: {} doesn't exist. Not " \
"deleting cache tier".format(storage_pool, cache_pool)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
pool = Pool(name=storage_pool, service=service)
pool.remove_cache_tier(cache_pool=cache_pool)
def handle_set_pool_value(request, service):
# Set arbitrary pool values
params = {'pool': request.get('name'),
'key': request.get('key'),
'value': request.get('value')}
if params['key'] not in POOL_KEYS:
msg = "Invalid key '%s'" % params['key']
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
# Get the validation method
validator_params = POOL_KEYS[params['key']]
if len(validator_params) is 1:
# Validate that what the user passed is actually legal per Ceph's rules
validator(params['value'], validator_params[0])
else:
# Validate that what the user passed is actually legal per Ceph's rules
validator(params['value'], validator_params[0], validator_params[1])
# Set the value
pool_set(service=service, pool_name=params['pool'], key=params['key'],
value=params['value'])
def process_requests_v1(reqs):
"""Process v1 requests.
@ -70,45 +266,45 @@ def process_requests_v1(reqs):
log("Processing %s ceph broker requests" % (len(reqs)), level=INFO)
for req in reqs:
op = req.get('op')
log("Processing op='%s'" % (op), level=DEBUG)
log("Processing op='%s'" % op, level=DEBUG)
# Use admin client since we do not have other client key locations
# setup to use them for these operations.
svc = 'admin'
if op == "create-pool":
params = {'pool': req.get('name'),
'replicas': req.get('replicas')}
if not all(params.iteritems()):
msg = ("Missing parameter(s): %s" %
(' '.join([k for k in params.iterkeys()
if not params[k]])))
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
pool_type = req.get('pool-type') # "replicated" | "erasure"
# Mandatory params
pool = params['pool']
replicas = params['replicas']
# Optional params
pg_num = req.get('pg_num')
if pg_num:
# Cap pg_num to max allowed just in case.
osds = get_osds(svc)
if osds:
pg_num = min(pg_num, (len(osds) * 100 // replicas))
# Ensure string
pg_num = str(pg_num)
if not pool_exists(service=svc, name=pool):
log("Creating pool '%s' (replicas=%s)" % (pool, replicas),
level=INFO)
create_pool(service=svc, name=pool, replicas=replicas,
pg_num=pg_num)
# Default to replicated if pool_type isn't given
if pool_type == 'erasure':
handle_erasure_pool(request=req, service=svc)
else:
log("Pool '%s' already exists - skipping create" % (pool),
level=DEBUG)
handle_replicated_pool(request=req, service=svc)
elif op == "create-cache-tier":
handle_create_cache_tier(request=req, service=svc)
elif op == "remove-cache-tier":
handle_remove_cache_tier(request=req, service=svc)
elif op == "create-erasure-profile":
handle_create_erasure_profile(request=req, service=svc)
elif op == "delete-pool":
pool = req.get('name')
delete_pool(service=svc, name=pool)
elif op == "rename-pool":
old_name = req.get('name')
new_name = req.get('new-name')
rename_pool(service=svc, old_name=old_name, new_name=new_name)
elif op == "snapshot-pool":
pool = req.get('name')
snapshot_name = req.get('snapshot-name')
snapshot_pool(service=svc, pool_name=pool,
snapshot_name=snapshot_name)
elif op == "remove-pool-snapshot":
pool = req.get('name')
snapshot_name = req.get('snapshot-name')
remove_pool_snapshot(service=svc, pool_name=pool,
snapshot_name=snapshot_name)
elif op == "set-pool-value":
handle_set_pool_value(request=req, service=svc)
else:
msg = "Unknown operation '%s'" % (op)
msg = "Unknown operation '%s'" % op
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}

View File

@ -54,7 +54,7 @@ from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.openstack.alternatives import install_alternative
from charmhelpers.contrib.network.ip import (
get_ipv6_addr,
format_ipv6_addr
format_ipv6_addr,
)
from charmhelpers.core.sysctl import create as create_sysctl
from charmhelpers.core.templating import render
@ -294,6 +294,7 @@ def emit_cephconf():
'ceph_public_network': public_network,
'ceph_cluster_network': cluster_network,
'loglevel': config('loglevel'),
'dio': str(config('use-direct-io')).lower(),
}
if config('prefer-ipv6'):

View File

@ -36,4 +36,3 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring
[mds]
keyring = /var/lib/ceph/mds/$cluster-$id/keyring

0
tests/018-basic-trusty-liberty Normal file → Executable file
View File

0
tests/019-basic-trusty-mitaka Normal file → Executable file
View File

0
tests/020-basic-wily-liberty Normal file → Executable file
View File

0
tests/021-basic-xenial-mitaka Normal file → Executable file
View File

View File

@ -3,6 +3,7 @@
import amulet
import re
import time
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
@ -30,6 +31,8 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
u.log.info('Waiting on extended status checks...')
exclude_services = ['mysql']
# Wait for deployment ready msgs, except exclusions
self._auto_wait_for_status(exclude_services=exclude_services)
self._initialize_tests()
@ -79,6 +82,9 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
'admin-token': 'ubuntutesting'}
mysql_config = {'dataset-size': '50%'}
cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
# Include a non-existent device as osd-devices is a whitelist,
# and this will catch cases where proposals attempt to change that.
ceph_config = {
'monitor-count': '3',
'auth-supported': 'none',
@ -198,7 +204,6 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
self.cinder_sentry: ['cinder-api',
'cinder-scheduler',
'cinder-volume'],
self.ceph_osd_sentry: ['ceph-osd-all'],
}
if self._get_openstack_release() < self.vivid_kilo:
@ -212,6 +217,13 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
services[self.ceph1_sentry] = ceph_services
services[self.ceph2_sentry] = ceph_services
ceph_osd_services = [
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
]
services[self.ceph_osd_sentry] = ceph_osd_services
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)

View File

@ -19,3 +19,4 @@ packages:
- python-novaclient
- python-pika
- python-swiftclient
- python-nose

View File

@ -1,12 +1,12 @@
import json
import mock
import unittest
import mock
import ceph_broker
class CephBrokerTestCase(unittest.TestCase):
def setUp(self):
super(CephBrokerTestCase, self).setUp()
@ -20,15 +20,15 @@ class CephBrokerTestCase(unittest.TestCase):
def test_process_requests_missing_api_version(self, mock_log):
req = json.dumps({'ops': []})
rc = ceph_broker.process_requests(req)
self.assertEqual(json.loads(rc), {'exit-code': 1,
'stderr':
('Missing or invalid api version '
'(None)')})
self.assertEqual(json.loads(rc), {
'exit-code': 1,
'stderr': 'Missing or invalid api version (None)'})
@mock.patch('ceph_broker.log')
def test_process_requests_invalid_api_version(self, mock_log):
req = json.dumps({'api-version': 2, 'ops': []})
rc = ceph_broker.process_requests(req)
print "Return: %s" % rc
self.assertEqual(json.loads(rc),
{'exit-code': 1,
'stderr': 'Missing or invalid api version (2)'})
@ -41,90 +41,88 @@ class CephBrokerTestCase(unittest.TestCase):
{'exit-code': 1,
'stderr': "Unknown operation 'invalid_op'"})
@mock.patch('ceph_broker.create_pool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool(self, mock_log, mock_pool_exists,
mock_create_pool):
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{'op': 'create-pool', 'name':
'foo', 'replicas': 3}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_create_pool.assert_called_with(service='admin', name='foo',
replicas=3, pg_num=None)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.get_osds')
@mock.patch('ceph_broker.create_pool')
@mock.patch('ceph_broker.ReplicatedPool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool_w_pg_num(self, mock_log,
mock_pool_exists,
mock_create_pool,
mock_replicated_pool,
mock_get_osds):
mock_get_osds.return_value = [0, 1, 2]
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{'op': 'create-pool', 'name':
'foo', 'replicas': 3,
'pg_num': 100}]})
'ops': [{
'op': 'create-pool',
'name': 'foo',
'replicas': 3,
'pg_num': 100}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_create_pool.assert_called_with(service='admin', name='foo',
replicas=3, pg_num='100')
mock_replicated_pool.assert_called_with(service='admin', name='foo',
replicas=3, pg_num=100)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.get_osds')
@mock.patch('ceph_broker.create_pool')
@mock.patch('ceph_broker.ReplicatedPool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool_w_pg_num_capped(self, mock_log,
mock_pool_exists,
mock_create_pool,
mock_replicated_pool,
mock_get_osds):
mock_get_osds.return_value = [0, 1, 2]
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{'op': 'create-pool', 'name':
'foo', 'replicas': 3,
'pg_num': 300}]})
'ops': [{
'op': 'create-pool',
'name': 'foo',
'replicas': 3,
'pg_num': 300}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_create_pool.assert_called_with(service='admin', name='foo',
replicas=3, pg_num='100')
mock_pool_exists.assert_called_with(service='admin',
name='foo')
mock_replicated_pool.assert_called_with(service='admin', name='foo',
replicas=3, pg_num=100)
self.assertEqual(json.loads(rc), {'exit-code': 0})
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.create_pool')
@mock.patch('ceph_broker.ReplicatedPool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool_exists(self, mock_log,
mock_pool_exists,
mock_create_pool):
mock_replicated_pool):
mock_pool_exists.return_value = True
reqs = json.dumps({'api-version': 1,
'ops': [{'op': 'create-pool', 'name': 'foo',
'ops': [{'op': 'create-pool',
'name': 'foo',
'replicas': 3}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
self.assertFalse(mock_create_pool.called)
mock_pool_exists.assert_called_with(service='admin',
name='foo')
self.assertFalse(mock_replicated_pool.create.called)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.create_pool')
@mock.patch('ceph_broker.ReplicatedPool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists,
mock_create_pool):
def test_process_requests_create_pool_rid(self, mock_log,
mock_pool_exists,
mock_replicated_pool):
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'request-id': '1ef5aede',
'ops': [{'op': 'create-pool', 'name':
'foo', 'replicas': 3}]})
'ops': [{
'op': 'create-pool',
'name': 'foo',
'replicas': 3}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_create_pool.assert_called_with(service='admin', name='foo',
replicas=3, pg_num=None)
mock_replicated_pool.assert_called_with(service='admin',
name='foo',
pg_num=None,
replicas=3)
self.assertEqual(json.loads(rc)['exit-code'], 0)
self.assertEqual(json.loads(rc)['request-id'], '1ef5aede')

217
unit_tests/test_ceph_ops.py Normal file
View File

@ -0,0 +1,217 @@
__author__ = 'chris'
import json
from hooks import ceph_broker
import mock
import unittest
class TestCephOps(unittest.TestCase):
"""
@mock.patch('ceph_broker.log')
def test_connect(self, mock_broker):
self.fail()
"""
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.create_erasure_profile')
def test_create_erasure_profile(self, mock_create_erasure, mock_log):
req = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-erasure-profile',
'name': 'foo',
'erasure-type': 'jerasure',
'failure-domain': 'rack',
'k': 3,
'm': 2,
}]})
rc = ceph_broker.process_requests(req)
mock_create_erasure.assert_called_with(service='admin',
profile_name='foo',
coding_chunks=2,
data_chunks=3,
locality=None,
failure_domain='rack',
erasure_plugin_name='jerasure')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.pool_exists')
@mock.patch('hooks.ceph_broker.ReplicatedPool.create')
def test_process_requests_create_replicated_pool(self,
mock_replicated_pool,
mock_pool_exists,
mock_log):
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
'pool-type': 'replicated',
'name': 'foo',
'replicas': 3
}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_replicated_pool.assert_called_with()
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.delete_pool')
def test_process_requests_delete_pool(self,
mock_delete_pool,
mock_log):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'delete-pool',
'name': 'foo',
}]})
rc = ceph_broker.process_requests(reqs)
mock_delete_pool.assert_called_with(service='admin', name='foo')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.pool_exists')
@mock.patch('hooks.ceph_broker.ErasurePool.create')
@mock.patch('hooks.ceph_broker.erasure_profile_exists')
def test_process_requests_create_erasure_pool(self, mock_profile_exists,
mock_erasure_pool,
mock_pool_exists,
mock_log):
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
'pool-type': 'erasure',
'name': 'foo',
'erasure-profile': 'default'
}]})
rc = ceph_broker.process_requests(reqs)
mock_profile_exists.assert_called_with(service='admin', name='default')
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_erasure_pool.assert_called_with()
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.pool_exists')
@mock.patch('hooks.ceph_broker.Pool.add_cache_tier')
def test_process_requests_create_cache_tier(self, mock_pool,
mock_pool_exists, mock_log):
mock_pool_exists.return_value = True
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-cache-tier',
'cold-pool': 'foo',
'hot-pool': 'foo-ssd',
'mode': 'writeback',
'erasure-profile': 'default'
}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_any_call(service='admin', name='foo')
mock_pool_exists.assert_any_call(service='admin', name='foo-ssd')
mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.pool_exists')
@mock.patch('hooks.ceph_broker.Pool.remove_cache_tier')
def test_process_requests_remove_cache_tier(self, mock_pool,
mock_pool_exists, mock_log):
mock_pool_exists.return_value = True
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'remove-cache-tier',
'hot-pool': 'foo-ssd',
}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_any_call(service='admin', name='foo-ssd')
mock_pool.assert_called_with(cache_pool='foo-ssd')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.snapshot_pool')
def test_snapshot_pool(self, mock_snapshot_pool, mock_log):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'snapshot-pool',
'name': 'foo',
'snapshot-name': 'foo-snap1',
}]})
rc = ceph_broker.process_requests(reqs)
mock_snapshot_pool.return_value = 1
mock_snapshot_pool.assert_called_with(service='admin',
pool_name='foo',
snapshot_name='foo-snap1')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.rename_pool')
def test_rename_pool(self, mock_rename_pool, mock_log):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'rename-pool',
'name': 'foo',
'new-name': 'foo2',
}]})
rc = ceph_broker.process_requests(reqs)
mock_rename_pool.assert_called_with(service='admin',
old_name='foo',
new_name='foo2')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.remove_pool_snapshot')
def test_remove_pool_snapshot(self, mock_snapshot_pool, mock_broker):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'remove-pool-snapshot',
'name': 'foo',
'snapshot-name': 'foo-snap1',
}]})
rc = ceph_broker.process_requests(reqs)
mock_snapshot_pool.assert_called_with(service='admin',
pool_name='foo',
snapshot_name='foo-snap1')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
@mock.patch('hooks.ceph_broker.pool_set')
def test_set_pool_value(self, mock_set_pool, mock_broker):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'set-pool-value',
'name': 'foo',
'key': 'size',
'value': 3,
}]})
rc = ceph_broker.process_requests(reqs)
mock_set_pool.assert_called_with(service='admin',
pool_name='foo',
key='size',
value=3)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
def test_set_invalid_pool_value(self, mock_broker):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'set-pool-value',
'name': 'foo',
'key': 'size',
'value': 'abc',
}]})
rc = ceph_broker.process_requests(reqs)
# self.assertRaises(AssertionError)
self.assertEqual(json.loads(rc)['exit-code'], 1)
'''
@mock.patch('ceph_broker.log')
def test_set_pool_max_bytes(self, mock_broker):
self.fail()
'''
if __name__ == '__main__':
unittest.main()

View File

@ -31,7 +31,6 @@ ENOUGH_PEERS_COMPLETE = {
class ServiceStatusTestCase(test_utils.CharmTestCase):
def setUp(self):
super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH)
self.config.side_effect = self.test_config.get