summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.openstack.org>2019-03-19 07:55:02 +0000
committerGerrit Code Review <review@openstack.org>2019-03-19 07:55:02 +0000
commit4a3a981e9d72888e494e27bc88652ff9f36aba64 (patch)
treefc918db3240c17672ead1d46ec2446ae134ecaf0
parent475f17791c8cb8866876925fd610e9745f71f510 (diff)
parent8655dbd9e34a31b11b276e34118e190f6bf8467e (diff)
Merge "udevadm settle before processing osd-devices"HEADmaster
-rwxr-xr-xhooks/ceph_hooks.py1
-rw-r--r--lib/ceph/broker.py17
-rw-r--r--lib/ceph/utils.py180
3 files changed, 181 insertions, 17 deletions
diff --git a/hooks/ceph_hooks.py b/hooks/ceph_hooks.py
index b9135fe..60246bf 100755
--- a/hooks/ceph_hooks.py
+++ b/hooks/ceph_hooks.py
@@ -521,6 +521,7 @@ def prepare_disks_and_activate():
521 log('ceph bootstrapped, rescanning disks') 521 log('ceph bootstrapped, rescanning disks')
522 emit_cephconf() 522 emit_cephconf()
523 bluestore = use_bluestore() 523 bluestore = use_bluestore()
524 ceph.udevadm_settle()
524 for dev in get_devices(): 525 for dev in get_devices():
525 ceph.osdize(dev, config('osd-format'), 526 ceph.osdize(dev, config('osd-format'),
526 osd_journal, 527 osd_journal,
diff --git a/lib/ceph/broker.py b/lib/ceph/broker.py
index 3e857d2..3226f4c 100644
--- a/lib/ceph/broker.py
+++ b/lib/ceph/broker.py
@@ -85,6 +85,7 @@ POOL_KEYS = {
85 "compression_mode": [str, ["none", "passive", "aggressive", "force"]], 85 "compression_mode": [str, ["none", "passive", "aggressive", "force"]],
86 "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], 86 "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]],
87 "compression_required_ratio": [float, [0.0, 1.0]], 87 "compression_required_ratio": [float, [0.0, 1.0]],
88 "crush_rule": [str],
88} 89}
89 90
90CEPH_BUCKET_TYPES = [ 91CEPH_BUCKET_TYPES = [
@@ -368,7 +369,8 @@ def handle_erasure_pool(request, service):
368 """ 369 """
369 pool_name = request.get('name') 370 pool_name = request.get('name')
370 erasure_profile = request.get('erasure-profile') 371 erasure_profile = request.get('erasure-profile')
371 quota = request.get('max-bytes') 372 max_bytes = request.get('max-bytes')
373 max_objects = request.get('max-objects')
372 weight = request.get('weight') 374 weight = request.get('weight')
373 group_name = request.get('group') 375 group_name = request.get('group')
374 376
@@ -408,8 +410,9 @@ def handle_erasure_pool(request, service):
408 pool.create() 410 pool.create()
409 411
410 # Set a quota if requested 412 # Set a quota if requested
411 if quota is not None: 413 if max_bytes or max_objects:
412 set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) 414 set_pool_quota(service=service, pool_name=pool_name,
415 max_bytes=max_bytes, max_objects=max_objects)
413 416
414 417
415def handle_replicated_pool(request, service): 418def handle_replicated_pool(request, service):
@@ -421,7 +424,8 @@ def handle_replicated_pool(request, service):
421 """ 424 """
422 pool_name = request.get('name') 425 pool_name = request.get('name')
423 replicas = request.get('replicas') 426 replicas = request.get('replicas')
424 quota = request.get('max-bytes') 427 max_bytes = request.get('max-bytes')
428 max_objects = request.get('max-objects')
425 weight = request.get('weight') 429 weight = request.get('weight')
426 group_name = request.get('group') 430 group_name = request.get('group')
427 431
@@ -468,8 +472,9 @@ def handle_replicated_pool(request, service):
468 level=DEBUG) 472 level=DEBUG)
469 473
470 # Set a quota if requested 474 # Set a quota if requested
471 if quota is not None: 475 if max_bytes or max_objects:
472 set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) 476 set_pool_quota(service=service, pool_name=pool_name,
477 max_bytes=max_bytes, max_objects=max_objects)
473 478
474 479
475def handle_create_cache_tier(request, service): 480def handle_create_cache_tier(request, service):
diff --git a/lib/ceph/utils.py b/lib/ceph/utils.py
index dac98c9..35f351f 100644
--- a/lib/ceph/utils.py
+++ b/lib/ceph/utils.py
@@ -935,6 +935,11 @@ def start_osds(devices):
935 subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) 935 subprocess.check_call(['ceph-disk', 'activate', dev_or_path])
936 936
937 937
938def udevadm_settle():
939 cmd = ['udevadm', 'settle']
940 subprocess.call(cmd)
941
942
938def rescan_osd_devices(): 943def rescan_osd_devices():
939 cmd = [ 944 cmd = [
940 'udevadm', 'trigger', 945 'udevadm', 'trigger',
@@ -943,8 +948,7 @@ def rescan_osd_devices():
943 948
944 subprocess.call(cmd) 949 subprocess.call(cmd)
945 950
946 cmd = ['udevadm', 'settle'] 951 udevadm_settle()
947 subprocess.call(cmd)
948 952
949 953
950_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" 954_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring"
@@ -1072,8 +1076,8 @@ _upgrade_caps = {
1072} 1076}
1073 1077
1074 1078
1075def get_radosgw_key(pool_list=None): 1079def get_radosgw_key(pool_list=None, name=None):
1076 return get_named_key(name='radosgw.gateway', 1080 return get_named_key(name=name or 'radosgw.gateway',
1077 caps=_radosgw_caps, 1081 caps=_radosgw_caps,
1078 pool_list=pool_list) 1082 pool_list=pool_list)
1079 1083
@@ -1128,6 +1132,15 @@ osd_upgrade_caps = collections.OrderedDict([
1128 ]) 1132 ])
1129]) 1133])
1130 1134
1135rbd_mirror_caps = collections.OrderedDict([
1136 ('mon', ['profile rbd']),
1137 ('osd', ['profile rbd']),
1138])
1139
1140
1141def get_rbd_mirror_key(name):
1142 return get_named_key(name=name, caps=rbd_mirror_caps)
1143
1131 1144
1132def create_named_keyring(entity, name, caps=None): 1145def create_named_keyring(entity, name, caps=None):
1133 caps = caps or _default_caps 1146 caps = caps or _default_caps
@@ -1819,6 +1832,13 @@ def _initialize_disk(dev, dev_uuid, encrypt=False,
1819 '--uuid', dev_uuid, 1832 '--uuid', dev_uuid,
1820 dev, 1833 dev,
1821 ]) 1834 ])
1835 subprocess.check_call([
1836 'dd',
1837 'if=/dev/zero',
1838 'of={}'.format(dm_crypt),
1839 'bs=512',
1840 'count=1',
1841 ])
1822 1842
1823 if use_vaultlocker: 1843 if use_vaultlocker:
1824 return dm_crypt 1844 return dm_crypt
@@ -1899,6 +1919,7 @@ def osdize_dir(path, encrypt=False, bluestore=False):
1899 return 1919 return
1900 1920
1901 mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) 1921 mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755)
1922 chownr('/var/lib/ceph', ceph_user(), ceph_user())
1902 cmd = [ 1923 cmd = [
1903 'sudo', '-u', ceph_user(), 1924 'sudo', '-u', ceph_user(),
1904 'ceph-disk', 1925 'ceph-disk',
@@ -2499,18 +2520,21 @@ def update_owner(path, recurse_dirs=True):
2499 secs=elapsed_time.total_seconds(), path=path), DEBUG) 2520 secs=elapsed_time.total_seconds(), path=path), DEBUG)
2500 2521
2501 2522
2502def list_pools(service): 2523def list_pools(client='admin'):
2503 """This will list the current pools that Ceph has 2524 """This will list the current pools that Ceph has
2504 2525
2505 :param service: String service id to run under 2526 :param client: (Optional) client id for ceph key to use
2506 :returns: list. Returns a list of the ceph pools. 2527 Defaults to ``admin``
2507 :raises: CalledProcessError if the subprocess fails to run. 2528 :type cilent: str
2529 :returns: Returns a list of available pools.
2530 :rtype: list
2531 :raises: subprocess.CalledProcessError if the subprocess fails to run.
2508 """ 2532 """
2509 try: 2533 try:
2510 pool_list = [] 2534 pool_list = []
2511 pools = str(subprocess 2535 pools = subprocess.check_output(['rados', '--id', client, 'lspools'],
2512 .check_output(['rados', '--id', service, 'lspools']) 2536 universal_newlines=True,
2513 .decode('UTF-8')) 2537 stderr=subprocess.STDOUT)
2514 for pool in pools.splitlines(): 2538 for pool in pools.splitlines():
2515 pool_list.append(pool) 2539 pool_list.append(pool)
2516 return pool_list 2540 return pool_list
@@ -2519,6 +2543,140 @@ def list_pools(service):
2519 raise 2543 raise
2520 2544
2521 2545
2546def get_pool_param(pool, param, client='admin'):
2547 """Get parameter from pool.
2548
2549 :param pool: Name of pool to get variable from
2550 :type pool: str
2551 :param param: Name of variable to get
2552 :type param: str
2553 :param client: (Optional) client id for ceph key to use
2554 Defaults to ``admin``
2555 :type cilent: str
2556 :returns: Value of variable on pool or None
2557 :rtype: str or None
2558 :raises: subprocess.CalledProcessError
2559 """
2560 try:
2561 output = subprocess.check_output(
2562 ['ceph', '--id', client, 'osd', 'pool', 'get', pool, param],
2563 universal_newlines=True, stderr=subprocess.STDOUT)
2564 except subprocess.CalledProcessError as cp:
2565 if cp.returncode == 2 and 'ENOENT: option' in cp.output:
2566 return None
2567 raise
2568 if ':' in output:
2569 return output.split(':')[1].lstrip().rstrip()
2570
2571
2572def get_pool_erasure_profile(pool, client='admin'):
2573 """Get erasure code profile for pool.
2574
2575 :param pool: Name of pool to get variable from
2576 :type pool: str
2577 :param client: (Optional) client id for ceph key to use
2578 Defaults to ``admin``
2579 :type cilent: str
2580 :returns: Erasure code profile of pool or None
2581 :rtype: str or None
2582 :raises: subprocess.CalledProcessError
2583 """
2584 try:
2585 return get_pool_param(pool, 'erasure_code_profile', client=client)
2586 except subprocess.CalledProcessError as cp:
2587 if cp.returncode == 13 and 'EACCES: pool' in cp.output:
2588 # Not a Erasure coded pool
2589 return None
2590 raise
2591
2592
2593def get_pool_quota(pool, client='admin'):
2594 """Get pool quota.
2595
2596 :param pool: Name of pool to get variable from
2597 :type pool: str
2598 :param client: (Optional) client id for ceph key to use
2599 Defaults to ``admin``
2600 :type cilent: str
2601 :returns: Dictionary with quota variables
2602 :rtype: dict
2603 :raises: subprocess.CalledProcessError
2604 """
2605 output = subprocess.check_output(
2606 ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool],
2607 universal_newlines=True, stderr=subprocess.STDOUT)
2608 rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)')
2609 result = {}
2610 for line in output.splitlines():
2611 m = rc.match(line)
2612 if m:
2613 result.update({'max_{}'.format(m.group(1)): m.group(2)})
2614 return result
2615
2616
2617def get_pool_applications(pool='', client='admin'):
2618 """Get pool applications.
2619
2620 :param pool: (Optional) Name of pool to get applications for
2621 Defaults to get for all pools
2622 :type pool: str
2623 :param client: (Optional) client id for ceph key to use
2624 Defaults to ``admin``
2625 :type cilent: str
2626 :returns: Dictionary with pool name as key
2627 :rtype: dict
2628 :raises: subprocess.CalledProcessError
2629 """
2630
2631 cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'get']
2632 if pool:
2633 cmd.append(pool)
2634 try:
2635 output = subprocess.check_output(cmd,
2636 universal_newlines=True,
2637 stderr=subprocess.STDOUT)
2638 except subprocess.CalledProcessError as cp:
2639 if cp.returncode == 2 and 'ENOENT' in cp.output:
2640 return {}
2641 raise
2642 return json.loads(output)
2643
2644
2645def list_pools_detail():
2646 """Get detailed information about pools.
2647
2648 Structure:
2649 {'pool_name_1': {'applications': {'application': {}},
2650 'parameters': {'pg_num': '42', 'size': '42'},
2651 'quota': {'max_bytes': '1000',
2652 'max_objects': '10'},
2653 },
2654 'pool_name_2': ...
2655 }
2656
2657 :returns: Dictionary with detailed pool information.
2658 :rtype: dict
2659 :raises: subproces.CalledProcessError
2660 """
2661 get_params = ['pg_num', 'size']
2662 result = {}
2663 applications = get_pool_applications()
2664 for pool in list_pools():
2665 result[pool] = {
2666 'applications': applications.get(pool, {}),
2667 'parameters': {},
2668 'quota': get_pool_quota(pool),
2669 }
2670 for param in get_params:
2671 result[pool]['parameters'].update({
2672 param: get_pool_param(pool, param)})
2673 erasure_profile = get_pool_erasure_profile(pool)
2674 if erasure_profile:
2675 result[pool]['parameters'].update({
2676 'erasure_code_profile': erasure_profile})
2677 return result
2678
2679
2522def dirs_need_ownership_update(service): 2680def dirs_need_ownership_update(service):
2523 """Determines if directories still need change of ownership. 2681 """Determines if directories still need change of ownership.
2524 2682