Allow devices to be added post-install

Track devices that have been added to the ring and allow
devices to be added to the ring post-install (currently
only allowed within the install hook). Devices added to
the ring prior to this patch existing will be migrated
to the tracking store of devices to avoid conflicts.

Change-Id: Id268dc6369041a4d2db6f30a997dfa0c0d73b93f
Closes-Bug: 1383390
This commit is contained in:
Edward Hope-Morley 2016-03-02 12:39:50 +00:00
parent 2ceda02a08
commit e7bf2f910e
4 changed files with 384 additions and 33 deletions

View File

@ -16,8 +16,10 @@ from lib.swift_storage_utils import (
setup_storage,
assert_charm_supports_ipv6,
setup_rsync,
remember_devices,
REQUIRED_INTERFACES,
assess_status,
ensure_devs_tracked,
)
from lib.misc_utils import pause_aware_restart_on_change
@ -27,6 +29,7 @@ from charmhelpers.core.hookenv import (
config,
log,
relation_get,
relation_ids,
relation_set,
relations_of_type,
status_set,
@ -85,6 +88,12 @@ def config_changed():
openstack_upgrade_available('swift'):
status_set('maintenance', 'Running openstack upgrade')
do_openstack_upgrade(configs=CONFIGS)
setup_storage()
for rid in relation_ids('swift-storage'):
swift_storage_relation_joined(rid=rid)
CONFIGS.write_all()
save_script_rc()
@ -96,23 +105,28 @@ def config_changed():
def upgrade_charm():
apt_install(filter_installed_packages(PACKAGES), fatal=True)
update_nrpe_config()
ensure_devs_tracked()
@hooks.hook()
def swift_storage_relation_joined():
devs = [os.path.basename(dev) for dev in determine_block_devices()]
def swift_storage_relation_joined(rid=None):
rel_settings = {
'zone': config('zone'),
'object_port': config('object-server-port'),
'container_port': config('container-server-port'),
'account_port': config('account-server-port'),
'device': ':'.join(devs),
}
devs = determine_block_devices() or []
devs = [os.path.basename(d) for d in devs]
rel_settings['device'] = ':'.join(devs)
# Keep a reference of devices we are adding to the ring
remember_devices(devs)
if config('prefer-ipv6'):
rel_settings['private-address'] = get_ipv6_addr()[0]
relation_set(**rel_settings)
relation_set(relation_id=rid, **rel_settings)
@hooks.hook('swift-storage-relation-changed')

View File

@ -1,5 +1,7 @@
import json
import os
import re
import subprocess
import shutil
import tempfile
@ -24,6 +26,10 @@ from charmhelpers.fetch import (
apt_update
)
from charmhelpers.core.unitdata import (
Storage as KVStore,
)
from charmhelpers.core.host import (
mkdir,
mount,
@ -37,8 +43,12 @@ from charmhelpers.core.hookenv import (
log,
DEBUG,
INFO,
WARNING,
ERROR,
unit_private_ip,
local_unit,
relation_get,
relation_ids,
)
from charmhelpers.contrib.storage.linux.utils import (
@ -106,6 +116,16 @@ RESTART_MAP = {
SWIFT_CONF_DIR = '/etc/swift'
SWIFT_RING_EXT = 'ring.gz'
# NOTE(hopem): we intentionally place this database outside of unit context so
# that if the unit, service or even entire environment is
# destroyed, there will still be a record of what devices were in
# use so that when the swift charm next executes, already used
# devices will not be unintentionally reformatted. If devices are
# to be recycled, they will need to be manually removed from this
# database.
# FIXME: add charm support for removing devices (see LP: #1448190)
KV_DB_PATH = '/var/lib/juju/swift_storage/charm_kvdata.db'
def ensure_swift_directories():
'''
@ -226,9 +246,8 @@ def guess_block_devices():
def determine_block_devices():
block_device = config('block-device')
if not block_device or block_device in ['None', 'none']:
log('No storage devices specified in config as block-device',
if not block_device or block_device.lower() == 'none':
log("No storage devices specified in 'block_device' config",
level=ERROR)
return None
@ -238,31 +257,192 @@ def determine_block_devices():
bdevs = block_device.split(' ')
# attempt to ensure block devices, but filter out missing devs
_none = ['None', 'none', None]
_none = ['None', 'none']
valid_bdevs = \
[x for x in map(ensure_block_device, bdevs) if x not in _none]
[x for x in map(ensure_block_device, bdevs) if str(x).lower() not in
_none]
log('Valid ensured block devices: %s' % valid_bdevs)
return valid_bdevs
def mkfs_xfs(bdev):
cmd = ['mkfs.xfs', '-f', '-i', 'size=1024', bdev]
def mkfs_xfs(bdev, force=False):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', 'size=1024', bdev]
check_call(cmd)
def devstore_safe_load(devstore):
"""Attempt to decode json data and return None if an error occurs while
also printing a log.
"""
if not devstore:
return None
try:
return json.loads(devstore)
except ValueError:
log("Unable to decode JSON devstore", level=DEBUG)
return None
def is_device_in_ring(dev, skip_rel_check=False, ignore_deactivated=True):
"""Check if device has been added to the ring.
First check local KV store then check storage rel with proxy.
"""
d = os.path.dirname(KV_DB_PATH)
if not os.path.isdir(d):
mkdir(d)
log("Device '%s' does not appear to be in use by Swift" % (dev),
level=INFO)
return False
# First check local KV store
kvstore = KVStore(KV_DB_PATH)
devstore = devstore_safe_load(kvstore.get(key='devices'))
kvstore.close()
deactivated = []
if devstore:
blk_uuid = get_device_blkid("/dev/%s" % (dev))
env_uuid = os.environ['JUJU_ENV_UUID']
masterkey = "%s@%s" % (dev, env_uuid)
if (masterkey in devstore and
devstore[masterkey].get('blkid') == blk_uuid and
devstore[masterkey].get('status') == 'active'):
log("Device '%s' appears to be in use by Swift (found in local "
"devstore)" % (dev), level=INFO)
return True
for key, val in devstore.iteritems():
if key != masterkey and val.get('blkid') == blk_uuid:
log("Device '%s' appears to be in use by Swift (found in "
"local devstore) but has a different JUJU_ENV_UUID "
"(current=%s, expected=%s). "
"This could indicate that the device was added as part of "
"a previous deployment and will require manual removal or "
"updating if it needs to be reformatted."
% (dev, key, masterkey), level=INFO)
return True
if ignore_deactivated:
deactivated = [k == masterkey and v.get('blkid') == blk_uuid and
v.get('status') != 'active'
for k, v in devstore.iteritems()]
if skip_rel_check:
log("Device '%s' does not appear to be in use by swift (searched "
"local devstore only)" % (dev), level=INFO)
return False
# Then check swift-storage relation with proxy
for rid in relation_ids('swift-storage'):
devstore = relation_get(attribute='device', rid=rid, unit=local_unit())
if devstore and dev in devstore.split(':'):
if not ignore_deactivated or dev not in deactivated:
log("Device '%s' appears to be in use by swift (found on "
"proxy relation) but was not found in local devstore so "
"will be added to the cache" % (dev), level=INFO)
remember_devices([dev])
return True
log("Device '%s' does not appear to be in use by swift (searched local "
"devstore and proxy relation)" % (dev), level=INFO)
return False
def get_device_blkid(dev):
blk_uuid = subprocess.check_output(['blkid', '-s', 'UUID', dev])
blk_uuid = re.match(r'^%s:\s+UUID="(.+)"$' % (dev), blk_uuid.strip())
if blk_uuid:
return blk_uuid.group(1)
else:
log("Failed to obtain device UUID for device '%s' - returning None" %
dev, level=WARNING)
return None
def remember_devices(devs):
"""Add device to local store of ringed devices."""
d = os.path.dirname(KV_DB_PATH)
if not os.path.isdir(d):
mkdir(d)
kvstore = KVStore(KV_DB_PATH)
devstore = devstore_safe_load(kvstore.get(key='devices')) or {}
env_uuid = os.environ['JUJU_ENV_UUID']
for dev in devs:
blk_uuid = get_device_blkid("/dev/%s" % (dev))
key = "%s@%s" % (dev, env_uuid)
if key in devstore and devstore[key].get('blkid') == blk_uuid:
log("Device '%s' already in devstore (status:%s)" %
(dev, devstore[key].get('status')), level=DEBUG)
else:
existing = [(k, v) for k, v in devstore.iteritems()
if v.get('blkid') == blk_uuid and
re.match("^(.+)@(.+)$", k).group(1) == dev]
if existing:
log("Device '%s' already in devstore but has a different "
"JUJU_ENV_UUID (%s)" %
(dev, re.match(".+@(.+)$", existing[0][0]).group(1)),
level=WARNING)
else:
log("Adding device '%s' with blkid='%s' to devstore" %
(blk_uuid, dev),
level=DEBUG)
devstore[key] = {'blkid': blk_uuid, 'status': 'active'}
if devstore:
kvstore.set(key='devices', value=json.dumps(devstore))
kvstore.flush()
kvstore.close()
def ensure_devs_tracked():
for rid in relation_ids('swift-storage'):
devs = relation_get(attribute='device', rid=rid, unit=local_unit())
if devs:
for dev in devs.split(':'):
# this will migrate if not already in the local store
is_device_in_ring(dev, skip_rel_check=True)
def setup_storage():
# Ensure /srv/node exists just in case no disks
# are detected and used.
mkdir(os.path.join('/srv', 'node'),
owner='swift', group='swift',
perms=0o755)
reformat = str(config('overwrite')).lower() == "true"
for dev in determine_block_devices():
if config('overwrite') in ['True', 'true']:
if is_device_in_ring(os.path.basename(dev)):
log("Device '%s' already in the ring - ignoring" % (dev))
continue
if reformat:
clean_storage(dev)
# if not cleaned and in use, mkfs should fail.
mkfs_xfs(dev)
_dev = os.path.basename(dev)
_mp = os.path.join('/srv', 'node', _dev)
try:
# If not cleaned and in use, mkfs should fail.
mkfs_xfs(dev, force=reformat)
except subprocess.CalledProcessError as exc:
# This is expected is a formatted device is provided and we are
# forcing the format.
log("Format device '%s' failed (%s) - continuing to next device" %
(dev, exc), level=WARNING)
continue
basename = os.path.basename(dev)
_mp = os.path.join('/srv', 'node', basename)
mkdir(_mp, owner='swift', group='swift')
options = None
@ -272,7 +452,7 @@ def setup_storage():
dev = loopback_device
options = "loop, defaults"
mountpoint = '/srv/node/%s' % _dev
mountpoint = '/srv/node/%s' % basename
filesystem = "xfs"
mount(dev, mountpoint, filesystem=filesystem)

View File

@ -1,4 +1,7 @@
from mock import patch
import os
import json
import uuid
from test_utils import CharmTestCase, patch_open
@ -15,6 +18,7 @@ TO_PATCH = [
'config',
'log',
'relation_set',
'relation_ids',
'relation_get',
'relations_of_type',
# charmhelpers.core.host
@ -111,42 +115,170 @@ class SwiftStorageRelationsTests(CharmTestCase):
self.assertTrue(self.CONFIGS.write_all.called)
self.assertTrue(self.setup_rsync.called)
def test_upgrade_charm(self):
@patch.object(hooks, 'ensure_devs_tracked')
def test_upgrade_charm(self, mock_ensure_devs_tracked):
self.filter_installed_packages.return_value = [
'python-psutil']
hooks.upgrade_charm()
self.apt_install.assert_called_with([
'python-psutil'], fatal=True)
self.assertTrue(self.update_nrpe_config.called)
self.assertTrue(mock_ensure_devs_tracked.called)
@patch('hooks.lib.swift_storage_utils.get_device_blkid',
lambda dev: str(uuid.uuid4()))
@patch.object(hooks.os, 'environ')
@patch('hooks.lib.swift_storage_utils.os.path.isdir', lambda *args: True)
@patch.object(hooks, 'relation_set')
@patch('hooks.lib.swift_storage_utils.local_unit')
@patch('hooks.lib.swift_storage_utils.relation_ids', lambda *args: [])
@patch('hooks.lib.swift_storage_utils.KVStore')
@patch.object(uuid, 'uuid4', lambda: 'a-test-uuid')
def test_storage_joined_single_device(self, mock_kvstore, mock_local_unit,
mock_rel_set, mock_environ):
mock_environ.get.side_effect = {'JUJU_ENV_UUID': uuid.uuid4()}
mock_local_unit.return_value = 'test/0'
kvstore = mock_kvstore.return_value
kvstore.__enter__.return_value = kvstore
kvstore.get.return_value = None
self.determine_block_devices.return_value = ['/dev/vdb']
def test_storage_joined_single_device(self):
self.determine_block_devices.return_value = [
'/dev/vdb']
hooks.swift_storage_relation_joined()
self.relation_set.assert_called_with(
mock_rel_set.assert_called_with(
relation_id=None,
device='vdb', object_port=6000, account_port=6002,
zone=1, container_port=6001
)
def test_storage_joined_ipv6(self):
kvstore.get.return_value = None
rel_settings = {}
def fake_kv_set(key, value):
rel_settings[key] = value
kvstore.set.side_effect = fake_kv_set
def fake_kv_get(key):
return rel_settings.get(key)
kvstore.get.side_effect = fake_kv_get
devices = {"vdb@%s" % (mock_environ['JUJU_ENV_UUID']):
{"status": "active",
"blkid": 'a-test-uuid'}}
kvstore.set.assert_called_with(key='devices',
value=json.dumps(devices))
@patch('hooks.lib.swift_storage_utils.get_device_blkid',
lambda dev: '%s-blkid-uuid' % os.path.basename(dev))
@patch.object(hooks.os, 'environ')
@patch('hooks.lib.swift_storage_utils.os.path.isdir', lambda *args: True)
@patch.object(hooks, 'relation_set')
@patch('hooks.lib.swift_storage_utils.relation_ids', lambda *args: [])
@patch('hooks.lib.swift_storage_utils.KVStore')
@patch.object(uuid, 'uuid4', lambda: 'a-test-uuid')
def test_storage_joined_ipv6(self, mock_kvstore, mock_rel_set,
mock_environ):
kvstore = mock_kvstore.return_value
kvstore.__enter__.return_value = kvstore
kvstore.get.return_value = None
self.determine_block_devices.return_value = ['/dev/vdb']
self.test_config.set('prefer-ipv6', True)
self.get_ipv6_addr.return_value = ['2001:db8:1::1']
hooks.swift_storage_relation_joined()
args = {
'relation_id': None,
'device': 'vdb', 'object_port': 6000,
'account_port': 6002, 'zone': 1, 'container_port': 6001,
'private-address': '2001:db8:1::1',
}
self.relation_set.assert_called_with(**args)
mock_rel_set.assert_called_with(**args)
kvstore.get.assert_called_with(key='devices')
def test_storage_joined_multi_device(self):
@patch('hooks.lib.swift_storage_utils.get_device_blkid',
lambda dev: '%s-blkid-uuid' % os.path.basename(dev))
@patch.object(hooks.os, 'environ')
@patch('hooks.lib.swift_storage_utils.os.path.isdir', lambda *args: True)
@patch('hooks.lib.swift_storage_utils.local_unit')
@patch('hooks.lib.swift_storage_utils.relation_ids', lambda *args: [])
@patch('hooks.lib.swift_storage_utils.KVStore')
@patch.object(uuid, 'uuid4', lambda: 'a-test-uuid')
def test_storage_joined_multi_device(self, mock_kvstore, mock_local_unit,
mock_environ):
mock_environ.get.side_effect = {'JUJU_ENV_UUID': uuid.uuid4()}
self.determine_block_devices.return_value = ['/dev/vdb', '/dev/vdc',
'/dev/vdd']
mock_local_unit.return_value = 'test/0'
kvstore = mock_kvstore.return_value
kvstore.__enter__.return_value = kvstore
kvstore.get.return_value = None
rel_settings = {}
def fake_kv_set(key, value):
rel_settings[key] = value
kvstore.set.side_effect = fake_kv_set
def fake_kv_get(key):
return rel_settings.get(key)
kvstore.get.side_effect = fake_kv_get
hooks.swift_storage_relation_joined()
self.relation_set.assert_called_with(
device='vdb:vdc:vdd', object_port=6000, account_port=6002,
zone=1, container_port=6001
env_uuid = mock_environ['JUJU_ENV_UUID']
devices = {"vdb@%s" % (env_uuid): {"status": "active",
"blkid": 'vdb-blkid-uuid'},
"vdd@%s" % (env_uuid): {"status": "active",
"blkid": 'vdd-blkid-uuid'},
"vdc@%s" % (env_uuid): {"status": "active",
"blkid": 'vdc-blkid-uuid'}}
kvstore.set.assert_called_with(
key='devices', value=json.dumps(devices)
)
@patch('hooks.lib.swift_storage_utils.get_device_blkid',
lambda dev: '%s-blkid-uuid' % os.path.basename(dev))
@patch.object(hooks.os, 'environ')
@patch('hooks.lib.swift_storage_utils.os.path.isdir', lambda *args: True)
@patch('hooks.lib.swift_storage_utils.local_unit')
@patch('hooks.lib.swift_storage_utils.relation_ids', lambda *args: [])
@patch('hooks.lib.swift_storage_utils.KVStore')
def test_storage_joined_dev_exists_unknown_juju_env_uuid(self,
mock_kvstore,
mock_local_unit,
mock_environ):
mock_environ.get.return_value = {'JUJU_ENV_UUID': uuid.uuid4()}
self.determine_block_devices.return_value = ['/dev/vdb', '/dev/vdc',
'/dev/vdd']
mock_local_unit.return_value = 'test/0'
kvstore = mock_kvstore.return_value
kvstore.__enter__.return_value = kvstore
kvstore.get.return_value = None
store = {'vdb@%s' % (uuid.uuid4()): {"status": "active",
"blkid": 'vdb-blkid-uuid'}}
def fake_kv_set(key, value):
store[key] = value
kvstore.set.side_effect = fake_kv_set
def fake_kv_get(key):
return store.get(key)
kvstore.get.side_effect = fake_kv_get
hooks.swift_storage_relation_joined()
env_uuid = mock_environ['JUJU_ENV_UUID']
devices = {"vdb@%s" % (env_uuid): {"status": "active",
"blkid": 'vdb-blkid-uuid'},
"vdd@%s" % (env_uuid): {"status": "active",
"blkid": 'vdd-blkid-uuid'},
"vdc@%s" % (env_uuid): {"status": "active",
"blkid": 'vdc-blkid-uuid'}}
kvstore.set.assert_called_with(
key='devices', value=json.dumps(devices)
)
@patch('sys.exit')

View File

@ -201,16 +201,24 @@ class SwiftStorageUtilsTests(CharmTestCase):
def test_mkfs_xfs(self):
swift_utils.mkfs_xfs('/dev/sdb')
self.check_call.assert_called_with(
['mkfs.xfs', '-i', 'size=1024', '/dev/sdb']
)
def test_mkfs_xfs_force(self):
swift_utils.mkfs_xfs('/dev/sdb', force=True)
self.check_call.assert_called_with(
['mkfs.xfs', '-f', '-i', 'size=1024', '/dev/sdb']
)
@patch.object(swift_utils, 'is_device_in_ring')
@patch.object(swift_utils, 'clean_storage')
@patch.object(swift_utils, 'mkfs_xfs')
@patch.object(swift_utils, 'determine_block_devices')
def test_setup_storage_no_overwrite(self, determine, mkfs, clean):
def test_setup_storage_no_overwrite(self, determine, mkfs, clean,
mock_is_device_in_ring):
mock_is_device_in_ring.return_value = False
determine.return_value = ['/dev/vdb']
self.test_config.set('overwrite', 'false')
swift_utils.setup_storage()
self.assertFalse(clean.called)
calls = [call(['chown', '-R', 'swift:swift', '/srv/node/']),
@ -222,15 +230,20 @@ class SwiftStorageUtilsTests(CharmTestCase):
call('/srv/node/vdb', group='swift', owner='swift')
])
@patch.object(swift_utils, 'is_device_in_ring')
@patch.object(swift_utils, 'clean_storage')
@patch.object(swift_utils, 'mkfs_xfs')
@patch.object(swift_utils, 'determine_block_devices')
def test_setup_storage_overwrite(self, determine, mkfs, clean):
def test_setup_storage_overwrite(self, determine, mkfs, clean,
mock_is_device_in_ring):
self.test_config.set('overwrite', True)
mock_is_device_in_ring.return_value = False
self.is_mapped_loopback_device.return_value = None
determine.return_value = ['/dev/vdb']
self.test_config.set('overwrite', 'True')
swift_utils.setup_storage()
clean.assert_called_with('/dev/vdb')
self.mkdir.assert_called_with('/srv/node/vdb', owner='swift',
group='swift')
self.mount.assert_called_with('/dev/vdb', '/srv/node/vdb',
filesystem='xfs')
self.fstab_add.assert_called_with('/dev/vdb', '/srv/node/vdb',
@ -359,9 +372,11 @@ class SwiftStorageUtilsTests(CharmTestCase):
for service in services:
self.assertIn(call(service), self.service_restart.call_args_list)
@patch.object(swift_utils, "is_device_in_ring")
@patch.object(swift_utils, "mkfs_xfs")
@patch.object(swift_utils, "determine_block_devices")
def test_setup_storage_img(self, determine, mkfs):
def test_setup_storage_img(self, determine, mkfs, mock_is_device_in_ring):
mock_is_device_in_ring.return_value = False
determine.return_value = ["/srv/test.img", ]
self.is_mapped_loopback_device.return_value = "/srv/test.img"
swift_utils.setup_storage()
@ -376,9 +391,19 @@ class SwiftStorageUtilsTests(CharmTestCase):
'xfs',
options='loop, defaults'
)
self.mkdir.assert_has_calls([
call('/srv/node', owner='swift', group='swift',
perms=0o755),
call('/srv/node/test.img', group='swift', owner='swift')
])
@patch.object(swift_utils.subprocess, "check_output")
def test_get_device_blkid(self, mock_check_output):
dev = '/dev/vdb'
cmd = ['blkid', '-s', 'UUID', dev]
ret = '/dev/vdb: UUID="808bc298-0609-4619-aaef-ed7a5ab0ebb7" \n'
mock_check_output.return_value = ret
uuid = swift_utils.get_device_blkid(dev)
self.assertEquals(uuid, "808bc298-0609-4619-aaef-ed7a5ab0ebb7")
mock_check_output.assert_called_with(cmd)