diff --git a/ceph/utils.py b/ceph/utils.py index 8e0e3c2..74579d5 100644 --- a/ceph/utils.py +++ b/ceph/utils.py @@ -90,6 +90,13 @@ PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'radosgw', 'xfsprogs', 'python-pyudev', 'lvm2', 'parted'] +CEPH_KEY_MANAGER = 'ceph' +VAULT_KEY_MANAGER = 'vault' +KEY_MANAGERS = [ + CEPH_KEY_MANAGER, + VAULT_KEY_MANAGER, +] + LinkSpeed = { "BASE_10": 10, "BASE_100": 100, @@ -1463,17 +1470,42 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): + ignore_errors=False, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, reformat_osd, ignore_errors, encrypt, - bluestore) + bluestore, key_manager) else: osdize_dir(dev, encrypt, bluestore) def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): + ignore_errors=False, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): + """ + Prepare a block device for use as a Ceph OSD + + A block device will only be prepared once during the lifetime + of the calling charm unit; future executions will be skipped. + + :param: dev: Full path to block device to use + :param: osd_format: Format for OSD filesystem + :param: osd_journal: List of block devices to use for OSD journals + :param: reformat_osd: Reformat devices that are not currently in use + which have been used previously + :param: ignore_errors: Don't fail in the event of any errors during + processing + :param: encrypt: Encrypt block devices using 'key_manager' + :param: bluestore: Use bluestore native ceph block device format + :param: key_manager: Key management approach for encryption keys + :raises subprocess.CalledProcessError: in the event that any supporting + subprocess operation failed + :raises ValueError: if an invalid key_manager is provided + """ + if key_manager not in KEY_MANAGERS: + raise ValueError('Unsupported key manager: {}'.format(key_manager)) + db = kv() osd_devices = db.get('osd-devices', []) if dev in osd_devices: @@ -1510,7 +1542,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, cmd = _ceph_volume(dev, osd_journal, encrypt, - bluestore) + bluestore, + key_manager) else: cmd = _ceph_disk(dev, osd_format, @@ -1591,7 +1624,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): return cmd -def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): +def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1602,6 +1636,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption :param: bluestore: Use bluestore storage for OSD + :param: key_manager: dm-crypt Key Manager to use :raises subprocess.CalledProcessError: in the event that any supporting LVM operation failed. :returns: list. 'ceph-volume' command and required parameters for @@ -1620,7 +1655,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): cmd.append('--filestore') main_device_type = 'data' - if encrypt: + if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') # On-disk journal volume creation @@ -1628,16 +1663,20 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): journal_lv_type = 'journal' cmd.append('--journal') cmd.append(_allocate_logical_volume( - dev, - journal_lv_type, - osd_fsid, - size='{}M'.format(calculate_volume_size('journal'))) + dev=dev, + lv_type=journal_lv_type, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + encrypt=encrypt, + key_manager=key_manager) ) cmd.append('--data') - cmd.append(_allocate_logical_volume(dev, - main_device_type, - osd_fsid)) + cmd.append(_allocate_logical_volume(dev=dev, + lv_type=main_device_type, + osd_fsid=osd_fsid, + encrypt=encrypt, + key_manager=key_manager)) if bluestore: for extra_volume in ('wal', 'db'): @@ -1647,11 +1686,13 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): least_used = find_least_used_utility_device(devices, lvs=True) cmd.append(_allocate_logical_volume( - least_used, - extra_volume, - osd_fsid, + dev=least_used, + lv_type=extra_volume, + osd_fsid=osd_fsid, size='{}M'.format(calculate_volume_size(extra_volume)), - shared=True) + shared=True, + encrypt=encrypt, + key_manager=key_manager) ) elif osd_journal: @@ -1659,11 +1700,13 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): least_used = find_least_used_utility_device(osd_journal, lvs=True) cmd.append(_allocate_logical_volume( - least_used, - 'journal', - osd_fsid, + dev=least_used, + lv_type='journal', + osd_fsid=osd_fsid, size='{}M'.format(calculate_volume_size('journal')), - shared=True) + shared=True, + encrypt=encrypt, + key_manager=key_manager) ) return cmd @@ -1761,7 +1804,23 @@ def calculate_volume_size(lv_type): return int(configured_size) / _units[lv_type] -def _initialize_disk(dev): +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return subprocess.check_output(cmd).decode('UTF-8').strip() + except subprocess.CalledProcessError: + return None + + +def _initialize_disk(dev, dev_uuid, encrypt=False, + key_manager=CEPH_KEY_MANAGER): """ Initialize a raw block device with a single paritition consuming 100% of the avaliable disk space. @@ -1769,10 +1828,25 @@ def _initialize_disk(dev): Function assumes that block device has already been wiped. :param: dev: path to block device to initialize + :param: dev_uuid: UUID to use for any dm-crypt operations + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: Key management approach for dm-crypt keys :raises: subprocess.CalledProcessError: if any parted calls fail :returns: str: Full path to new partition. """ partition = _partition_name(dev) + use_vaultlocker = encrypt and key_manager == VAULT_KEY_MANAGER + + if use_vaultlocker: + # NOTE(jamespage): Check to see if already initialized as a LUKS + # volume, which indicates this is a shared block + # device for journal, db or wal volumes. + luks_uuid = _luks_uuid(partition) + if luks_uuid: + return '/dev/mapper/crypt-{}'.format(luks_uuid) + + dm_crypt = '/dev/mapper/crypt-{}'.format(dev_uuid) + if not os.path.exists(partition): subprocess.check_call([ 'parted', '--script', @@ -1786,11 +1860,25 @@ def _initialize_disk(dev): 'mkpart', 'primary', '1', '100%', ]) - return partition + + if use_vaultlocker and not os.path.exists(dm_crypt): + subprocess.check_call([ + 'vaultlocker', + 'encrypt', + '--uuid', dev_uuid, + partition, + ]) + + if use_vaultlocker: + return dm_crypt + else: + return partition def _allocate_logical_volume(dev, lv_type, osd_fsid, - size=None, shared=False): + size=None, shared=False, + encrypt=False, + key_manager=CEPH_KEY_MANAGER): """ Allocate a logical volume from a block device, ensuring any required initialization and setup of PV's and VG's to support @@ -1803,13 +1891,19 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, :param: size: Size in LVM format for the device; if unset 100% of VG :param: shared: Shared volume group (journal, wal, db) + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: dm-crypt Key Manager to use :raises subprocess.CalledProcessError: in the event that any supporting LVM or parted operation fails. :returns: str: String in the format 'vg_name/lv_name'. """ lv_name = "osd-{}-{}".format(lv_type, osd_fsid) current_volumes = lvm.list_logical_volumes() - pv_dev = _initialize_disk(dev) + if shared: + dev_uuid = str(uuid.uuid4()) + else: + dev_uuid = osd_fsid + pv_dev = _initialize_disk(dev, dev_uuid, encrypt, key_manager) vg_name = None if not lvm.is_lvm_physical_volume(pv_dev): diff --git a/tox.ini b/tox.ini index a4a0b6d..f8388cf 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,7 @@ skip_missing_interpreters = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 install_command = - pip install --allow-unverified python-apt {opts} {packages} + pip install {opts} {packages} commands = find . -type f -name "*.py[c|o]" -delete find . -type d -name "__pycache__" -delete rm -f .testrepository/times.dbm diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py index 2d71613..7184b55 100644 --- a/unit_tests/test_utils.py +++ b/unit_tests/test_utils.py @@ -122,7 +122,7 @@ class CephTestCase(unittest.TestCase): _is_active_bluestore_device.return_value = False utils.osdize('/dev/sdb', osd_format='xfs', osd_journal=None, reformat_osd=True, bluestore=False) - _ceph_volume.assert_called_with('/dev/sdb', None, False, False) + _ceph_volume.assert_called_with('/dev/sdb', None, False, False, 'ceph') _check_call.assert_called_with(['ceph-volume', 'prepare']) _zap_disk.assert_called_once() db.get.assert_called_with('osd-devices', []) @@ -653,11 +653,15 @@ class CephVolumeSizeCalculatorTestCase(unittest.TestCase): class CephInitializeDiskTestCase(unittest.TestCase): + @patch.object(utils, '_luks_uuid') @patch.object(utils.subprocess, 'check_call') @patch.object(utils.os.path, 'exists') - def test_initialize_disk(self, _exists, _check_call): + def test_initialize_disk(self, _exists, _check_call, + _luks_uuid): _exists.return_value = False - self.assertEqual(utils._initialize_disk('/dev/sdb'), + _luks_uuid.return_value = None + self.assertEqual(utils._initialize_disk('/dev/sdb', + 'test-UUID'), '/dev/sdb1') _check_call.assert_has_calls([ call([ @@ -674,11 +678,61 @@ class CephInitializeDiskTestCase(unittest.TestCase): ]), ]) + @patch.object(utils, '_luks_uuid') @patch.object(utils.subprocess, 'check_call') @patch.object(utils.os.path, 'exists') - def test_initialize_disk_exists(self, _exists, _check_call): + def test_initialize_disk_vaultlocker(self, _exists, _check_call, + _luks_uuid): + _exists.return_value = False + _luks_uuid.return_value = None + self.assertEqual(utils._initialize_disk('/dev/sdb', + 'test-UUID', + True, + 'vault'), + '/dev/mapper/crypt-test-UUID') + _check_call.assert_has_calls([ + call([ + 'parted', '--script', + '/dev/sdb', + 'mklabel', + 'gpt', + ]), + call([ + 'parted', '--script', + '/dev/sdb', + 'mkpart', + 'primary', '1', '100%', + ]), + call(['vaultlocker', 'encrypt', + '--uuid', 'test-UUID', + '/dev/sdb1']), + ]) + + @patch.object(utils, '_luks_uuid') + @patch.object(utils.subprocess, 'check_call') + @patch.object(utils.os.path, 'exists') + def test_initialize_disk_vaultlocker_encrypted(self, + _exists, + _check_call, + _luks_uuid): _exists.return_value = True - self.assertEqual(utils._initialize_disk('/dev/sdb'), + _luks_uuid.return_value = 'existing-UUID' + self.assertEqual(utils._initialize_disk('/dev/sdb', + 'test-UUID', + True, + 'vault'), + '/dev/mapper/crypt-existing-UUID') + _check_call.assert_not_called() + + @patch.object(utils, '_luks_uuid') + @patch.object(utils.subprocess, 'check_call') + @patch.object(utils.os.path, 'exists') + def test_initialize_disk_exists(self, _exists, _check_call, + _luks_uuid): + _exists.return_value = True + _luks_uuid.return_value = None + self.assertEqual(utils._initialize_disk('/dev/sdb', + 'test-UUID'), '/dev/sdb1') _check_call.assert_not_called() @@ -759,7 +813,8 @@ class CephAllocateVolumeTestCase(unittest.TestCase): @patch.object(utils, 'lvm') def _test_allocate_logical_volume(self, _lvm, _uuid4, _initialize_disk, dev, lv_type, osd_fsid, - size=None, shared=False): + size=None, shared=False, encrypt=False, + key_manager='ceph'): test_uuid = '1234-1234-1234-1234' pv_dev = utils._partition_name(dev) @@ -794,7 +849,12 @@ class CephAllocateVolumeTestCase(unittest.TestCase): else: _lvm.create_logical_volume.assert_not_called() - _initialize_disk.assert_called_with(dev) + _initialize_disk.assert_called_with( + dev, + osd_fsid if not shared else test_uuid, + encrypt, + key_manager + ) def test_allocate_lv_already_pv(self): self._test_allocate_logical_volume(dev='/dev/sdb', lv_type='data', @@ -945,9 +1005,8 @@ class CephVolumeTestCase(unittest.TestCase): _calculate_volume_size.return_value = 1024 _uuid4.return_value = self._osd_uuid _allocate_logical_volume.side_effect = ( - lambda dev, lv_type, osd_fsid, size=None, shared=False: ( - 'ceph-{fsid}/osd-{type}-{fsid}'.format(fsid=osd_fsid, - type=lv_type) + lambda *args, **kwargs: ( + 'ceph-{osd_fsid}/osd-{lv_type}-{osd_fsid}'.format(**kwargs) ) ) self.assertEqual( @@ -969,8 +1028,12 @@ class CephVolumeTestCase(unittest.TestCase): 'osd-data-{fsid}').format(fsid=self._osd_uuid)] ) _allocate_logical_volume.assert_has_calls([ - call('/dev/sdb', 'journal', self._osd_uuid, size='1024M'), - call('/dev/sdb', 'data', self._osd_uuid), + call(dev='/dev/sdb', lv_type='journal', + osd_fsid=self._osd_uuid, size='1024M', + encrypt=False, key_manager='ceph'), + call(dev='/dev/sdb', lv_type='data', + osd_fsid=self._osd_uuid, + encrypt=False, key_manager='ceph'), ]) _find_least_used_utility_device.assert_not_called() _calculate_volume_size.assert_called_with('journal') @@ -989,9 +1052,8 @@ class CephVolumeTestCase(unittest.TestCase): _calculate_volume_size.return_value = 1024 _uuid4.return_value = self._osd_uuid _allocate_logical_volume.side_effect = ( - lambda dev, lv_type, osd_fsid, size=None, shared=False: ( - 'ceph-{fsid}/osd-{type}-{fsid}'.format(fsid=osd_fsid, - type=lv_type) + lambda *args, **kwargs: ( + 'ceph-{osd_fsid}/osd-{lv_type}-{osd_fsid}'.format(**kwargs) ) ) self.assertEqual( @@ -1013,9 +1075,13 @@ class CephVolumeTestCase(unittest.TestCase): 'osd-journal-{fsid}').format(fsid=self._osd_uuid)] ) _allocate_logical_volume.assert_has_calls([ - call('/dev/sdb', 'data', self._osd_uuid), - call('/dev/sdc', 'journal', self._osd_uuid, - shared=True, size='1024M'), + call(dev='/dev/sdb', lv_type='data', + osd_fsid=self._osd_uuid, + encrypt=False, key_manager='ceph'), + call(dev='/dev/sdc', lv_type='journal', + osd_fsid=self._osd_uuid, + shared=True, size='1024M', + encrypt=False, key_manager='ceph'), ]) _find_least_used_utility_device.assert_has_calls([ call(['/dev/sdc'], lvs=True), @@ -1037,9 +1103,8 @@ class CephVolumeTestCase(unittest.TestCase): _calculate_volume_size.return_value = 1024 _uuid4.return_value = self._osd_uuid _allocate_logical_volume.side_effect = ( - lambda dev, lv_type, osd_fsid, size=None, shared=False: ( - 'ceph-{fsid}/osd-{type}-{fsid}'.format(fsid=osd_fsid, - type=lv_type) + lambda *args, **kwargs: ( + 'ceph-{osd_fsid}/osd-{lv_type}-{osd_fsid}'.format(**kwargs) ) ) self.assertEqual( @@ -1058,7 +1123,9 @@ class CephVolumeTestCase(unittest.TestCase): 'osd-block-{fsid}').format(fsid=self._osd_uuid)] ) _allocate_logical_volume.assert_has_calls([ - call('/dev/sdb', 'block', self._osd_uuid), + call(dev='/dev/sdb', lv_type='block', + osd_fsid=self._osd_uuid, + encrypt=False, key_manager='ceph'), ]) _find_least_used_utility_device.assert_not_called() _calculate_volume_size.assert_not_called() @@ -1082,9 +1149,8 @@ class CephVolumeTestCase(unittest.TestCase): _calculate_volume_size.return_value = 1024 _uuid4.return_value = self._osd_uuid _allocate_logical_volume.side_effect = ( - lambda dev, lv_type, osd_fsid, size=None, shared=False: ( - 'ceph-{fsid}/osd-{type}-{fsid}'.format(fsid=osd_fsid, - type=lv_type) + lambda *args, **kwargs: ( + 'ceph-{osd_fsid}/osd-{lv_type}-{osd_fsid}'.format(**kwargs) ) ) self.assertEqual( @@ -1109,11 +1175,17 @@ class CephVolumeTestCase(unittest.TestCase): 'osd-db-{fsid}').format(fsid=self._osd_uuid)] ) _allocate_logical_volume.assert_has_calls([ - call('/dev/sdb', 'block', self._osd_uuid), - call('/dev/sdd', 'wal', self._osd_uuid, - shared=True, size='1024M'), - call('/dev/sdc', 'db', self._osd_uuid, - shared=True, size='1024M'), + call(dev='/dev/sdb', lv_type='block', + osd_fsid=self._osd_uuid, + encrypt=False, key_manager='ceph'), + call(dev='/dev/sdd', lv_type='wal', + osd_fsid=self._osd_uuid, + shared=True, size='1024M', + encrypt=False, key_manager='ceph'), + call(dev='/dev/sdc', lv_type='db', + osd_fsid=self._osd_uuid, + shared=True, size='1024M', + encrypt=False, key_manager='ceph'), ]) _find_least_used_utility_device.assert_has_calls([ call(['/dev/sdd'], lvs=True),