Convert the charm to Python3

* Move charmhelpers to the root of the charm
* sync charmhelpers to latest version

Change-Id: Id0b838f0206635cf912d205f2fb6fda7b31d0dfe
This commit is contained in:
Alex Kavanagh 2019-02-01 12:17:30 +00:00
parent b53207d2fd
commit ddd26acc6d
145 changed files with 746 additions and 185 deletions

View File

@ -1,4 +1,3 @@
- project:
templates:
- python-charm-jobs
- openstack-python35-jobs-nonvoting
- python35-charm-jobs

View File

@ -1 +0,0 @@
create-cache-tier.py

41
actions/create-cache-tier Executable file
View File

@ -0,0 +1,41 @@
#!/usr/bin/python
__author__ = 'chris'
from subprocess import CalledProcessError
import sys
sys.path.append('hooks')
from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists
from charmhelpers.core.hookenv import action_get, log, action_fail
def make_cache_tier():
backer_pool = action_get("backer-pool")
cache_pool = action_get("cache-pool")
cache_mode = action_get("cache-mode")
# Pre flight checks
if not pool_exists('admin', backer_pool):
log("Please create {} pool before calling create-cache-tier".format(
backer_pool))
action_fail("create-cache-tier failed. Backer pool {} must exist "
"before calling this".format(backer_pool))
if not pool_exists('admin', cache_pool):
log("Please create {} pool before calling create-cache-tier".format(
cache_pool))
action_fail("create-cache-tier failed. Cache pool {} must exist "
"before calling this".format(cache_pool))
pool = Pool(service='admin', name=backer_pool)
try:
pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode)
except CalledProcessError as err:
log("Add cache tier failed with message: {}".format(
err.message))
action_fail("create-cache-tier failed. Add cache tier failed with "
"message: {}".format(err.message))
if __name__ == '__main__':
make_cache_tier()

View File

@ -1,9 +1,21 @@
#!/usr/bin/python
#!/usr/bin/env python3
__author__ = 'chris'
import os
from subprocess import CalledProcessError
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists
from charmhelpers.core.hookenv import action_get, log, action_fail

View File

@ -1,8 +1,20 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
from subprocess import CalledProcessError
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile
from charmhelpers.core.hookenv import action_get, log, action_fail
@ -29,7 +41,7 @@ def make_erasure_profile():
coding_chunks=m,
failure_domain=failure_domain)
except CalledProcessError as e:
log(e)
log(str(e))
action_fail("Create erasure profile failed with "
"message: {}".format(e.message))
elif plugin == "isa":
@ -43,7 +55,7 @@ def make_erasure_profile():
coding_chunks=m,
failure_domain=failure_domain)
except CalledProcessError as e:
log(e)
log(str(e))
action_fail("Create erasure profile failed with "
"message: {}".format(e.message))
elif plugin == "local":
@ -59,7 +71,7 @@ def make_erasure_profile():
locality=l,
failure_domain=failure_domain)
except CalledProcessError as e:
log(e)
log(str(e))
action_fail("Create erasure profile failed with "
"message: {}".format(e.message))
elif plugin == "shec":
@ -75,7 +87,7 @@ def make_erasure_profile():
durability_estimator=c,
failure_domain=failure_domain)
except CalledProcessError as e:
log(e)
log(str(e))
action_fail("Create erasure profile failed with "
"message: {}".format(e.message))
else:

View File

@ -1,7 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool
@ -31,7 +43,7 @@ def create_pool():
"is allowed".format(pool_type))
except CalledProcessError as e:
action_fail("Pool creation failed because of a failed process. "
"Ret Code: {} Message: {}".format(e.returncode, e.message))
"Ret Code: {} Message: {}".format(e.returncode, str(e)))
if __name__ == '__main__':

View File

@ -1,10 +1,21 @@
#!/usr/bin/python
#!/usr/bin/env python3
from subprocess import CalledProcessError
__author__ = 'chris'
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile
from charmhelpers.core.hookenv import action_get, log, action_fail

View File

@ -1,7 +1,18 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
import rados
from ceph_ops import connect
@ -20,8 +31,8 @@ def remove_pool():
rados.NoData,
rados.NoSpace,
rados.PermissionError) as e:
log(e)
action_fail(e)
log(str(e))
action_fail(str(e))
if __name__ == '__main__':

View File

@ -1,8 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
__author__ = 'chris'
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile
from charmhelpers.core.hookenv import action_get, action_set

View File

@ -1,9 +1,20 @@
#!/usr/bin/python
#!/usr/bin/env python3
__author__ = 'chris'
import sys
import os
from subprocess import check_output, CalledProcessError
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.core.hookenv import action_get, log, action_set, action_fail
@ -17,6 +28,6 @@ if __name__ == '__main__':
'ls']).decode('UTF-8')
action_set({'message': out})
except CalledProcessError as e:
log(e)
log(str(e))
action_fail("Listing erasure profiles failed with error: {}".format(
e.message))
str(e)))

View File

@ -1,9 +1,20 @@
#!/usr/bin/python
#!/usr/bin/env python3
__author__ = 'chris'
import sys
import os
from subprocess import check_output, CalledProcessError
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.core.hookenv import log, action_set, action_fail
@ -13,5 +24,5 @@ if __name__ == '__main__':
'osd', 'lspools']).decode('UTF-8')
action_set({'message': out})
except CalledProcessError as e:
log(e)
action_fail("List pools failed with error: {}".format(e.message))
log(str(e))
action_fail("List pools failed with error: {}".format(str(e)))

View File

@ -1,9 +1,20 @@
#!/usr/bin/python
#!/usr/bin/env python3
__author__ = 'chris'
import sys
import os
from subprocess import check_output, CalledProcessError
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.core.hookenv import log, action_set, action_get, action_fail
@ -15,5 +26,5 @@ if __name__ == '__main__':
'osd', 'pool', 'get', name, key]).decode('UTF-8')
action_set({'message': out})
except CalledProcessError as e:
log(e)
action_fail("Pool get failed with message: {}".format(e.message))
log(str(e))
action_fail("Pool get failed with message: {}".format(str(e)))

View File

@ -1,8 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
from subprocess import CalledProcessError
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.core.hookenv import action_get, log, action_fail
from ceph_broker import handle_set_pool_value
@ -18,6 +29,6 @@ if __name__ == '__main__':
try:
handle_set_pool_value(service='admin', request=request)
except CalledProcessError as e:
log(e.message)
log(str(e))
action_fail("Setting pool key: {} and value: {} failed with "
"message: {}".format(key, value, e.message))
"message: {}".format(key, value, str(e)))

View File

@ -1,7 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from subprocess import check_output, CalledProcessError
from charmhelpers.core.hookenv import log, action_set, action_fail
@ -11,5 +23,5 @@ if __name__ == '__main__':
'df']).decode('UTF-8')
action_set({'message': out})
except CalledProcessError as e:
log(e)
action_fail("ceph df failed with message: {}".format(e.message))
log(str(e))
action_fail("ceph df failed with message: {}".format(str(e)))

View File

@ -1 +0,0 @@
remove-cache-tier.py

41
actions/remove-cache-tier Executable file
View File

@ -0,0 +1,41 @@
#!/usr/bin/python
from subprocess import CalledProcessError
import sys
sys.path.append('hooks')
from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists
from charmhelpers.core.hookenv import action_get, log, action_fail
__author__ = 'chris'
def delete_cache_tier():
backer_pool = action_get("backer-pool")
cache_pool = action_get("cache-pool")
# Pre flight checks
if not pool_exists('admin', backer_pool):
log("Backer pool {} must exist before calling this".format(
backer_pool))
action_fail("remove-cache-tier failed. Backer pool {} must exist "
"before calling this".format(backer_pool))
if not pool_exists('admin', cache_pool):
log("Cache pool {} must exist before calling this".format(
cache_pool))
action_fail("remove-cache-tier failed. Cache pool {} must exist "
"before calling this".format(cache_pool))
pool = Pool(service='admin', name=backer_pool)
try:
pool.remove_cache_tier(cache_pool=cache_pool)
except CalledProcessError as err:
log("Removing the cache tier failed with message: {}".format(
err.message))
action_fail("remove-cache-tier failed. Removing the cache tier failed "
"with message: {}".format(err.message))
if __name__ == '__main__':
delete_cache_tier()

View File

@ -1,8 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
from subprocess import CalledProcessError
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists
from charmhelpers.core.hookenv import action_get, log, action_fail
@ -32,9 +43,9 @@ def delete_cache_tier():
pool.remove_cache_tier(cache_pool=cache_pool)
except CalledProcessError as err:
log("Removing the cache tier failed with message: {}".format(
err.message))
str(err)))
action_fail("remove-cache-tier failed. Removing the cache tier failed "
"with message: {}".format(err.message))
"with message: {}".format(str(err)))
if __name__ == '__main__':

View File

@ -1,7 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot
@ -14,6 +26,6 @@ if __name__ == '__main__':
pool_name=name,
snapshot_name=snapname)
except CalledProcessError as e:
log(e)
log(str(e))
action_fail("Remove pool snapshot failed with message: {}".format(
e.message))
str(e)))

View File

@ -1,7 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import rename_pool
@ -12,5 +24,5 @@ if __name__ == '__main__':
try:
rename_pool(service='admin', old_name=name, new_name=new_name)
except CalledProcessError as e:
log(e)
action_fail("Renaming pool failed with message: {}".format(e.message))
log(str(e))
action_fail("Renaming pool failed with message: {}".format(str(e)))

View File

@ -1,7 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import set_pool_quota
@ -12,5 +24,5 @@ if __name__ == '__main__':
try:
set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes)
except CalledProcessError as e:
log(e)
action_fail("Set pool quota failed with message: {}".format(e.message))
log(str(e))
action_fail("Set pool quota failed with message: {}".format(str(e)))

View File

@ -1,7 +1,19 @@
#!/usr/bin/python
#!/usr/bin/env python3
import os
import sys
sys.path.append('hooks')
_path = os.path.dirname(os.path.realpath(__file__))
_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
_root = os.path.abspath(os.path.join(_path, '..'))
def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_hooks)
_add_path(_root)
from subprocess import CalledProcessError
from charmhelpers.core.hookenv import action_get, log, action_fail
from charmhelpers.contrib.storage.linux.ceph import snapshot_pool
@ -14,5 +26,5 @@ if __name__ == '__main__':
pool_name=name,
snapshot_name=snapname)
except CalledProcessError as e:
log(e)
action_fail("Snapshot pool failed with message: {}".format(e.message))
log(str(e))
action_fail("Snapshot pool failed with message: {}".format(str(e)))

View File

@ -1,5 +1,5 @@
repo: https://github.com/juju/charm-helpers
destination: hooks/charmhelpers
destination: charmhelpers
include:
- core
- cli

View File

@ -19,9 +19,16 @@ from charmhelpers.core import unitdata
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
def unitdata_cmd(subparser):
nested = subparser.add_subparsers()
get_cmd = nested.add_parser('get', help='Retrieve data')
get_cmd.add_argument('key', help='Key to retrieve the value of')
get_cmd.set_defaults(action='get', value=None)
getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
getrange_cmd.add_argument('key', metavar='prefix',
help='Prefix of the keys to retrieve')
getrange_cmd.set_defaults(action='getrange', value=None)
set_cmd = nested.add_parser('set', help='Store data')
set_cmd.add_argument('key', help='Key to set')
set_cmd.add_argument('value', help='Value to store')
@ -30,6 +37,8 @@ def unitdata_cmd(subparser):
def _unitdata_cmd(action, key, value):
if action == 'get':
return unitdata.kv().get(key)
elif action == 'getrange':
return unitdata.kv().getrange(key)
elif action == 'set':
unitdata.kv().set(key, value)
unitdata.kv().flush()

View File

@ -194,7 +194,7 @@ SWIFT_CODENAMES = OrderedDict([
('rocky',
['2.18.0', '2.19.0']),
('stein',
['2.19.0']),
['2.20.0']),
])
# >= Liberty version->codename mapping
@ -656,7 +656,7 @@ def openstack_upgrade_available(package):
else:
avail_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(avail_vers, cur_vers) == 1
return apt.version_compare(avail_vers, cur_vers) >= 1
def ensure_block_device(block_device):

View File

@ -59,6 +59,7 @@ from charmhelpers.core.host import (
service_stop,
service_running,
umount,
cmp_pkgrevno,
)
from charmhelpers.fetch import (
apt_install,
@ -178,7 +179,6 @@ class Pool(object):
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(self.service, cache_pool)
version = ceph_version()
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@ -186,7 +186,7 @@ class Pool(object):
elif mode == 'writeback':
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward']
if version >= '10.1':
if cmp_pkgrevno('ceph-common', '10.1') >= 0:
# Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it')
@ -196,7 +196,8 @@ class Pool(object):
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
device_class=None):
"""Return the number of placement groups to use when creating the pool.
Returns the number of placement groups which should be specified when
@ -229,6 +230,9 @@ class Pool(object):
increased. NOTE: the default is primarily to handle the scenario
where related charms requiring pools has not been upgraded to
include an update to indicate their relative usage of the pools.
:param device_class: str. class of storage to use for basis of pgs
calculation; ceph supports nvme, ssd and hdd by default based
on presence of devices of each type in the deployment.
:return: int. The number of pgs to use.
"""
@ -243,17 +247,20 @@ class Pool(object):
# If the expected-osd-count is specified, then use the max between
# the expected-osd-count and the actual osd_count
osd_list = get_osds(self.service)
osd_list = get_osds(self.service, device_class)
expected = config('expected-osd-count') or 0
if osd_list:
osd_count = max(expected, len(osd_list))
if device_class:
osd_count = len(osd_list)
else:
osd_count = max(expected, len(osd_list))
# Log a message to provide some insight if the calculations claim
# to be off because someone is setting the expected count and
# there are more OSDs in reality. Try to make a proper guess
# based upon the cluster itself.
if expected and osd_count != expected:
if not device_class and expected and osd_count != expected:
log("Found more OSDs than provided expected count. "
"Using the actual count instead", INFO)
elif expected:
@ -575,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
raise
# max_bytes should be an int or long
def set_pool_quota(service, pool_name, max_bytes):
def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param max_bytes: int or long
:return: None. Can raise CalledProcessError
:param service: The Ceph user name to run the command under
:type service: str
:param pool_name: Name of pool
:type pool_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
:raises: subprocess.CalledProcessError
"""
# Set a byte quota on a RADOS pool in ceph.
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
'max_bytes', str(max_bytes)]
try:
check_call(cmd)
except CalledProcessError:
raise
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
if max_bytes:
cmd = cmd + ['max_bytes', str(max_bytes)]
if max_objects:
cmd = cmd + ['max_objects', str(max_objects)]
check_call(cmd)
def remove_pool_quota(service, pool_name):
@ -626,7 +636,8 @@ def remove_erasure_profile(service, profile_name):
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
failure_domain='host',
data_chunks=2, coding_chunks=1,
locality=None, durability_estimator=None):
locality=None, durability_estimator=None,
device_class=None):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@ -640,10 +651,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param coding_chunks: int
:param locality: int
:param durability_estimator: int
:param device_class: six.string_types
:return: None. Can raise CalledProcessError
"""
version = ceph_version()
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
@ -654,12 +664,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
# failure_domain changed in luminous
if version and version >= '12.0.0':
if luminous_or_later:
cmd.append('crush-failure-domain=' + failure_domain)
else:
cmd.append('ruleset-failure-domain=' + failure_domain)
# device class new in luminous
if luminous_or_later and device_class:
cmd.append('crush-device-class={}'.format(device_class))
else:
log('Skipping device class configuration (ceph < 12.0.0)',
level=DEBUG)
# Add plugin specific information
if locality is not None:
# For local erasure codes
@ -744,20 +762,26 @@ def pool_exists(service, name):
return name in out.split()
def get_osds(service):
def get_osds(service, device_class=None):
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
cluster (optionally filtered by storage device class).
:param device_class: Class of storage device for OSD's
:type device_class: str
"""
version = ceph_version()
if version and version >= '0.56':
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
if luminous_or_later and device_class:
out = check_output(['ceph', '--id', service,
'osd', 'crush', 'class',
'ls-osd', device_class,
'--format=json'])
else:
out = check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
return None
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
def install():
@ -811,7 +835,7 @@ def set_app_name_for_pool(client, pool, name):
:raises: CalledProcessError if ceph call fails
"""
if ceph_version() >= '12.0.0':
if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
cmd = ['ceph', '--id', client, 'osd', 'pool',
'application', 'enable', pool, name]
check_call(cmd)
@ -1091,22 +1115,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
return True
def ceph_version():
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
if six.PY3:
output = output.decode('UTF-8')
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None
class CephBrokerRq(object):
"""Ceph broker request.
@ -1147,14 +1155,47 @@ class CephBrokerRq(object):
'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
"""Adds an operation to create a pool.
weight=None, group=None, namespace=None,
app_name=None, max_bytes=None, max_objects=None):
"""DEPRECATED: Use ``add_op_create_replicated_pool()`` or
``add_op_create_erasure_pool()`` instead.
"""
return self.add_op_create_replicated_pool(
name, replica_count=replica_count, pg_num=pg_num, weight=weight,
group=group, namespace=namespace, app_name=app_name,
max_bytes=max_bytes, max_objects=max_objects)
@param pg_num setting: optional setting. If not provided, this value
will be calculated by the broker based on how many OSDs are in the
cluster at the time of creation. Note that, if provided, this value
will be capped at the current available maximum.
@param weight: the percentage of data the pool makes up
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None,
app_name=None, max_bytes=None,
max_objects=None):
"""Adds an operation to create a replicated pool.
:param name: Name of pool to create
:type name: str
:param replica_count: Number of copies Ceph should keep of your data.
:type replica_count: int
:param pg_num: Request specific number of Placement Groups to create
for pool.
:type pg_num: int
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
Used to calculate number of Placement Groups to create
for pool.
:type weight: float
:param group: Group to add pool to
:type group: str
:param namespace: Group namespace
:type namespace: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
"""
if pg_num and weight:
raise ValueError('pg_num and weight are mutually exclusive')
@ -1162,7 +1203,41 @@ class CephBrokerRq(object):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count, 'pg_num': pg_num,
'weight': weight, 'group': group,
'group-namespace': namespace})
'group-namespace': namespace, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects})
def add_op_create_erasure_pool(self, name, erasure_profile=None,
weight=None, group=None, app_name=None,
max_bytes=None, max_objects=None):
"""Adds an operation to create a erasure coded pool.
:param name: Name of pool to create
:type name: str
:param erasure_profile: Name of erasure code profile to use. If not
set the ceph-mon unit handling the broker
request will set its default value.
:type erasure_profile: str
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
:type weight: float
:param group: Group to add pool to
:type group: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
"""
self.ops.append({'op': 'create-pool', 'name': name,
'pool-type': 'erasure',
'erasure-profile': erasure_profile,
'weight': weight,
'group': group, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects})
def set_ops(self, ops):
"""Set request ops to provided value.

Some files were not shown because too many files have changed in this diff Show More