From 59f113d912d9c594c1f76824694f06626f77b8a1 Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Wed, 26 Apr 2017 18:59:46 +1200 Subject: [PATCH] Add ceph methods for reweight Adds the following methods: - get_ceph_pg_stat - get_ceph_health - reweight_osd Change-Id: I23a1563875cfa53cf4341f17ce14c5322cc955ca --- ceph/__init__.py | 72 ++++++++++++++++++++++++ unit_tests/test_ceph.py | 122 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 194 insertions(+) diff --git a/ceph/__init__.py b/ceph/__init__.py index 7dab66c..f29adfc 100644 --- a/ceph/__init__.py +++ b/ceph/__init__.py @@ -2033,3 +2033,75 @@ def resolve_ceph_version(source): ''' os_release = get_os_codename_install_source(source) return UCA_CODENAME_MAP.get(os_release) + + +def get_ceph_pg_stat(): + """ + Returns the result of ceph pg stat + :return: dict + """ + try: + tree = check_output(['ceph', 'pg', 'stat', '--format=json']) + try: + json_tree = json.loads(tree) + if not json_tree['num_pg_by_state']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph pg stat json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph pg stat command failed with message: {}".format( + e.message)) + raise + + +def get_ceph_health(): + """ + Returns the health of the cluster from a 'ceph health' + :return: dict + Also raises CalledProcessError if our ceph command fails + To get the overall status, use get_ceph_health()['overall_status'] + """ + try: + tree = check_output( + ['ceph', 'health', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['overall_status']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def reweight_osd(osd_num, new_weight): + """ + Changes the crush weight of an OSD to the value specified. + :param osd_num: the osd id which should be changed + :param new_weight: the new weight for the OSD + :returns: bool. True if output looks right, else false. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + try: + cmd_result = subprocess.check_output( + ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), + new_weight], stderr=subprocess.STDOUT) + expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( + ID=osd_num) + " to {}".format(new_weight) + log(cmd_result) + if expected_result in cmd_result: + return True + return False + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise diff --git a/unit_tests/test_ceph.py b/unit_tests/test_ceph.py index 0fa9f81..69d5d90 100644 --- a/unit_tests/test_ceph.py +++ b/unit_tests/test_ceph.py @@ -16,6 +16,7 @@ import mock import unittest import ceph from subprocess import CalledProcessError +import subprocess class TestDevice(): @@ -200,6 +201,127 @@ class CephTestCase(unittest.TestCase): partition_list = ceph.get_partition_list('/dev/xvdb') self.assertEqual(len(partition_list), 2) + @mock.patch.object(ceph, 'check_output') + def test_get_ceph_pg_stat(self, output): + """It returns the current PG stat""" + output.return_value = """{ + "num_pg_by_state": [ + { + "name": "active+clean", + "num": 320 + } + ], + "version": 7111, + "num_pgs": 320, + "num_bytes": 118111608230, + "raw_bytes_used": 355042729984, + "raw_bytes_avail": 26627104956416, + "raw_bytes": 26982147686400 +}""" + pg_stat = ceph.get_ceph_pg_stat() + self.assertEqual(pg_stat['num_pgs'], 320) + + @mock.patch.object(ceph, 'check_output') + def test_get_ceph_health(self, output): + """It gives the current Ceph health""" + output.return_value = """{ + "health": { + "health_services": [ + { + "mons": [ + { + "name": "node1", + "kb_total": 2883598592, + "kb_used": 61728860, + "kb_avail": 2675368308, + "avail_percent": 92, + "last_updated": "2017-04-25 22:17:36.966046", + "store_stats": { + "bytes_total": 18612017, + "bytes_sst": 0, + "bytes_log": 2172670, + "bytes_misc": 16439347, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "node2", + "kb_total": 2883598592, + "kb_used": 79776472, + "kb_avail": 2657320696, + "avail_percent": 92, + "last_updated": "2017-04-25 22:18:27.915641", + "store_stats": { + "bytes_total": 18517923, + "bytes_sst": 0, + "bytes_log": 3340129, + "bytes_misc": 15177794, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "node3", + "kb_total": 2883598592, + "kb_used": 77399744, + "kb_avail": 2659697424, + "avail_percent": 92, + "last_updated": "2017-04-25 22:18:27.934053", + "store_stats": { + "bytes_total": 18517892, + "bytes_sst": 0, + "bytes_log": 3340129, + "bytes_misc": 15177763, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + } + ] + } + ] + }, + "timechecks": { + "epoch": 8, + "round": 3022, + "round_status": "finished", + "mons": [ + { + "name": "node1", + "skew": 0, + "latency": 0, + "health": "HEALTH_OK" + }, + { + "name": "node2", + "skew": 0, + "latency": 0.000765, + "health": "HEALTH_OK" + }, + { + "name": "node3", + "skew": 0, + "latency": 0.000765, + "health": "HEALTH_OK" + } + ] + }, + "summary": [], + "overall_status": "HEALTH_OK", + "detail": [] +}""" + health = ceph.get_ceph_health() + self.assertEqual(health['overall_status'], "HEALTH_OK") + + @mock.patch.object(subprocess, 'check_output') + def test_reweight_osd(self, mock_reweight): + """It changes the weight of an OSD""" + mock_reweight.return_value = "reweighted item id 0 name 'osd.0' to 1" + reweight_result = ceph.reweight_osd('0', '1') + self.assertEqual(reweight_result, True) + mock_reweight.assert_called_once_with( + ['ceph', 'osd', 'crush', 'reweight', 'osd.0', '1'], stderr=-2) + class CephVersionTestCase(unittest.TestCase): @mock.patch.object(ceph, 'get_os_codename_install_source')