Use new partitioning tools and remove unused partition code

Update data and deploy drivers to enable usage of new objects model and
new partitioning tools.
Remove a lot of old partition code, become unused/never reached code.

Change-Id: I9f01304632be28626abaf8fb322dd4fda0017589
This commit is contained in:
Dmitry Bogun 2016-12-26 17:37:12 +02:00 committed by Andrii Ostapenko
parent 9dfef0641a
commit 0d52a375cc
11 changed files with 185 additions and 2317 deletions

View File

@ -58,10 +58,6 @@ class PartitioningDataDriverMixin(object):
def partition_scheme(self):
"""Retruns instance of PartionScheme object"""
@abc.abstractproperty
def hw_partition_scheme(self):
"""Returns instance of PartitionSchema object"""
@six.add_metaclass(abc.ABCMeta)
class ProvisioningDataDriverMixin(object):

View File

@ -66,10 +66,6 @@ class GenericDataDriver(BaseDataDriver,
self._partition_scheme = self._get_partition_scheme()
return self._partition_scheme
@property
def hw_partition_scheme(self):
raise NotImplementedError
@property
def image_scheme(self):
if not hasattr(self, '_image_scheme'):

View File

@ -14,11 +14,8 @@
# limitations under the License.
import collections
import fnmatch
import itertools
import json
import math
import os
from oslo_config import cfg
from oslo_log import log as logging
@ -27,8 +24,6 @@ from bareon.drivers.data.generic import GenericDataDriver
from bareon import errors
from bareon import objects
from bareon.utils import block_device
from bareon.utils import hardware as hu
from bareon.utils import partition as pu
from bareon.utils import utils
@ -36,26 +31,96 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MiB = 2 ** 20
DEFAULT_LVM_META_SIZE = 64 * MiB
DEFAULT_GRUB_SIZE = 24 * MiB
class Ironic(GenericDataDriver):
loader_partition_size = block_device.SizeUnit(24, 'MiB')
multiboot_partition_size = block_device.SizeUnit(100, 'MiB')
_multiboot_claim = None
data_validation_schema = 'ironic.json'
_root_on_lvm = None
_boot_on_lvm = None
# satisfy abstractproperty
_partition_data = tuple()
def __init__(self, data):
super(Ironic, self).__init__(data)
self._original_data = data
convert_size(self.data['partitions'])
self.partitions_policy = self.data.get('partitions_policy', 'verify')
self.storage_claim = StorageParser(
self.data, self.image_scheme).storage
@property
def storage_claim(self):
return StorageParser(self._original_data, self.image_scheme).claim
self.fs_by_os = self._collect_fs_bindings()
self.is_multiboot = 1 < len(self.fs_by_os)
if self.is_multiboot:
self._multiboot_claim = self._handle_multiboot()
self.fs_by_mount = self._collect_fs_claims()
self.boot_on_lvm = self._check_is_boot_on_lvm()
self._handle_loader()
self._partition_scheme = DeprecatedPartitionSchemeBuilder(
self.storage_claim, self._multiboot_claim).schema
def _collect_fs_claims(self):
result = collections.defaultdict(list)
for claim in self.storage_claim.items_by_kind(
objects.block_device.FileSystemMixin, recursion=True):
result[claim.mount].append(claim)
return dict(result)
def _collect_fs_bindings(self):
result = collections.defaultdict(list)
for claim in self.storage_claim.items_by_kind(
objects.block_device.FileSystemMixin, recursion=True):
for bind in claim.os_binding:
result[bind].append(claim)
return dict(result)
def _check_is_boot_on_lvm(self):
is_lvm_claim = []
for mount in ('/', '/boot'):
for claim in self.fs_by_mount.get(mount, ()):
if not isinstance(claim, objects.block_device.LVMlv):
continue
is_lvm = True
break
else:
is_lvm = False
is_lvm_claim.append(is_lvm)
root_on_lvm, boot_on_lvm = is_lvm_claim
if not boot_on_lvm:
if '/boot' not in self.fs_by_mount:
boot_on_lvm = root_on_lvm
return boot_on_lvm
def _handle_multiboot(self):
disk_claim = self.storage_claim.items_by_kind(
objects.block_device.Disk)
try:
disk_claim = next(disk_claim)
except StopIteration:
raise errors.WrongInputDataError(
'There is no any disk defined. Multiboot feature require '
'disk to make service boot-partition.')
size = block_device.SpaceClaim.new_by_sizeunit(
self.multiboot_partition_size)
boot_claim = objects.block_device.Partition(
size, is_boot=True, is_service=True, file_system='ext4')
disk_claim.add(boot_claim, head=True)
return boot_claim
def _handle_loader(self):
for disk_claim in self.storage_claim.items_by_kind(
objects.block_device.Disk):
size = block_device.SpaceClaim.new_by_sizeunit(
self.loader_partition_size)
claim = objects.block_device.Partition(
size, guid_code=0xEF02, is_service=True)
disk_claim.add(claim, head=True)
def _get_image_meta(self):
pass
@ -63,11 +128,11 @@ class Ironic(GenericDataDriver):
def _get_image_scheme(self):
LOG.debug('--- Preparing image schema ---')
data = self.data
image_schema = objects.ImageScheme()
image_scheme = objects.ImageScheme()
image_list = data['images']
deployment_flags = data.get('image_deploy_flags', {})
image_schema.images = [objects.Image(uri=image['image_pull_url'],
image_scheme.images = [objects.Image(uri=image['image_pull_url'],
target_device=image['target'],
format='bare',
container='raw',
@ -79,450 +144,10 @@ class Ironic(GenericDataDriver):
''),
deployment_flags=deployment_flags)
for image in image_list]
return image_schema
return image_scheme
def get_os_ids(self):
images = set([image.os_id for image in self.image_scheme.images])
partitions = set([id
for fs in self.partition_scheme.fss
for id in fs.os_id])
return images & partitions
def get_image_ids(self):
return [image.os_id for image in self.image_scheme.images]
@property
def is_multiboot(self):
return True if len(self.get_image_ids()) > 1 else False
@property
def hw_partition_scheme(self):
if not hasattr(self, '_hw_partition_scheme'):
self._hw_partition_scheme = self._get_hw_partition_schema()
return self._hw_partition_scheme
@property
def root_on_lvm(self):
return self.partition_scheme and self._root_on_lvm
@property
def boot_on_lvm(self):
no_separate_boot = (self.partition_scheme.fs_by_mount('/boot') is None)
return ((no_separate_boot and self.root_on_lvm) or
self._boot_on_lvm)
def _partition_data(self):
return self.data['partitions']
def _get_partition_scheme(self):
"""Reads disk/partitions volumes/vgs from given deploy_config
Translating different ids (name, path, scsi) to name via
scanning/comparing the underlying node hardware.
"""
LOG.debug('--- Preparing partition scheme ---')
LOG.debug('Looping over all disks in provision data')
multiboot_installed = False
partition_schema = objects.PartitionScheme()
for disk in self._ks_disks:
# # skipping disk if there are no volumes with size >0
# # to be allocated on it which are not boot partitions
if all((v["size"] <= 0 for v in disk["volumes"] if
v.get("mount") != "/boot")):
continue
LOG.debug('Processing disk type:%s id:%s' % (
disk['id']['type'], disk['id']['value']))
LOG.debug('Adding gpt table on disk type:%s id:%s' % (
disk['id']['type'], disk['id']['value']))
parted = partition_schema.add_parted(
name=self._disk_dev(disk), label='gpt', disk_size=disk['size'])
# TODO(lobur): do not add partitions implicitly, they may fail
# partition verification
parted.add_partition(size=DEFAULT_GRUB_SIZE, flags=['bios_grub'])
if self.is_multiboot and not multiboot_installed:
multiboot_installed = True
multiboot_partition = parted.add_partition(size=100 * MiB)
partition_schema.add_fs(device=multiboot_partition.name,
mount='multiboot', fs_type='ext4',
fstab_enabled=False, os_id=[])
LOG.debug('Looping over all volumes on disk type:%s id:%s' % (
disk['id']['type'], disk['id']['value']))
for volume in disk['volumes']:
LOG.debug('Processing volume: '
'name=%s type=%s size=%s mount=%s vg=%s '
'keep_data=%s' %
(volume.get('name'), volume.get('type'),
volume.get('size'), volume.get('mount'),
volume.get('vg'), volume.get('keep_data')))
if volume['size'] <= 0:
LOG.debug('Volume size is zero. Skipping.')
continue
FUNC_MAP = {
'partition': self._process_partition,
'raid': self._process_raid,
'pv': self._process_pv
}
FUNC_MAP[volume['type']](volume, disk, parted,
partition_schema)
LOG.debug('Looping over all volume groups in provision data')
for vg in self._ks_vgs:
self._process_vg(vg, partition_schema)
partition_schema.elevate_keep_data()
return partition_schema
def _process_partition(self, volume, disk, parted, partition_schema):
partition = self._add_partition(volume, disk, parted)
if 'partition_guid' in volume:
LOG.debug('Setting partition GUID: %s' %
volume['partition_guid'])
partition.set_guid(volume['partition_guid'])
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Adding file system on partition: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'xfs')))
partition_schema.add_fs(
device=partition.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')),
fstab_options=volume.get('fstab_options', 'defaults'),
fstab_enabled=volume.get('fstab_enabled', True),
os_id=volume.get('images', self.get_image_ids()[:1]),
)
parted.install_bootloader = True
if volume['mount'] == '/boot' and not self._boot_done:
self._boot_done = True
def _process_pv(self, volume, disk, parted, partition_schema):
partition = self._add_partition(volume, disk, parted)
LOG.debug('Creating pv on partition: pv=%s vg=%s' %
(partition.name, volume['vg']))
lvm_meta_size = volume.get('lvm_meta_size', DEFAULT_LVM_META_SIZE)
lvm_meta_size = utils.B2MiB(lvm_meta_size)
# The reason for that is to make sure that
# there will be enough space for creating logical volumes.
# Default lvm extension size is 4M. Nailgun volume
# manager does not care of it and if physical volume size
# is 4M * N + 3M and lvm metadata size is 4M * L then only
# 4M * (N-L) + 3M of space will be available for
# creating logical extensions. So only 4M * (N-L) of space
# will be available for logical volumes, while nailgun
# volume manager might reguire 4M * (N-L) + 3M
# logical volume. Besides, parted aligns partitions
# according to its own algorithm and actual partition might
# be a bit smaller than integer number of mebibytes.
if lvm_meta_size < 10:
raise errors.WrongPartitionSchemeError(
'Error while creating physical volume: '
'lvm metadata size is too small')
metadatasize = int(math.floor((lvm_meta_size - 8) / 2))
metadatacopies = 2
partition_schema.vg_attach_by_name(
pvname=partition.name, vgname=volume['vg'],
metadatasize=metadatasize,
metadatacopies=metadatacopies)
def _process_raid(self, volume, disk, parted, partition_schema):
partition = self._add_partition(volume, disk, parted)
if not partition:
return
if 'mount' in volume and volume['mount'] not in ('none', '/boot'):
LOG.debug('Attaching partition to RAID '
'by its mount point %s' % volume['mount'])
partition_schema.md_attach_by_mount(
device=partition.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
if 'mount' in volume and volume['mount'] == '/boot' and \
not self._boot_done:
LOG.debug('Adding file system on partition: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'ext2')))
partition_schema.add_fs(
device=partition.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'ext2'),
fs_label=self._getlabel(volume.get('disk_label')),
fstab_options=volume.get('fstab_options', 'defaults'),
fstab_enabled=volume.get('fstab_enabled', True),
os_id=volume.get('images', self.get_image_ids()[:1]),
)
parted.install_bootloader = True
self._boot_done = True
def _process_vg(self, volume_group, partition_schema):
LOG.debug('Processing vg %s' % volume_group['id'])
LOG.debug(
'Looping over all logical volumes in vg %s' % volume_group['id'])
for volume in volume_group['volumes']:
LOG.debug('Processing lv %s' % volume['name'])
if volume['size'] <= 0:
LOG.debug('LogicalVolume size is zero. Skipping.')
continue
if volume['type'] == 'lv':
LOG.debug('Adding lv to vg %s: name=%s, size=%s' %
(volume_group['id'], volume['name'], volume['size']))
lv = partition_schema.add_lv(name=volume['name'],
vgname=volume_group['id'],
size=volume['size'])
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Adding file system on lv: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'xfs')))
partition_schema.add_fs(
device=lv.device_name,
mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')),
fstab_options=volume.get('fstab_options',
'defaults'),
fstab_enabled=volume.get('fstab_enabled', True),
os_id=volume.get('images', self.get_image_ids()[:1]),
)
lv_path = "%s/%s" % (volume_group['id'], volume['name'])
if volume['mount'] == '/':
self._root_on_lvm = lv_path
elif volume['mount'] == '/boot':
self._boot_on_lvm = lv_path
def _add_partition(self, volume, disk, parted):
partition = None
if volume.get('mount') != '/boot':
LOG.debug('Adding partition on disk %s: size=%s' % (
disk['id']['value'], volume['size']))
partition = parted.add_partition(
size=volume['size'],
keep_data=self._get_keep_data_flag(volume))
LOG.debug('Partition name: %s' % partition.name)
elif volume.get('mount') == '/boot' \
and not self._boot_partition_done \
and (disk in self._small_ks_disks or not self._small_ks_disks):
# NOTE(kozhukalov): On some hardware GRUB is not able
# to see disks larger than 2T due to firmware bugs,
# so we'd better avoid placing /boot on such
# huge disks if it is possible.
LOG.debug('Adding /boot partition on disk %s: '
'size=%s', disk['id']['value'], volume['size'])
partition = parted.add_partition(
size=volume['size'],
keep_data=self._get_keep_data_flag(volume))
LOG.debug('Partition name: %s', partition.name)
self._boot_partition_done = True
else:
LOG.debug('No need to create partition on disk %s. '
'Skipping.', disk['id']['value'])
return partition
def _get_keep_data_flag(self, volume):
# For the new policy-based driver the default is True
return volume.get('keep_data', True)
def _get_hw_partition_schema(self):
"""Reads disks/partitions from underlying hardware.
Does not rely on deploy_config
"""
# NOTE(lobur): Only disks/partitions currently supported.
# No vgs/volumes
LOG.debug('--- Reading HW partition scheme from the node ---')
fstab = self._find_hw_fstab()
LOG.debug('Scanning all disks on the node')
partition_schema = objects.PartitionScheme()
for dev in self.hu_disks:
disk_info = pu.scan_device(dev['name'])
disk_meta = disk_info['generic']
parted = partition_schema.add_parted(
name=disk_meta['dev'],
label=disk_meta['table'],
install_bootloader=disk_meta['has_bootloader']
)
LOG.debug('Scanning all partitions on disk %s '
% disk_meta['dev'])
for part in disk_info['parts']:
if part['fstype'] == 'free':
LOG.debug('Skipping a free partition at:'
'begin=%s, end=%s' %
(part['begin'], part['end']))
continue
LOG.debug('Adding partition: '
'name=%s size=%s to hw schema' %
(part['master_dev'], part['size']))
# NOTE(lobur): avoid use of parted.add_partition to omit
# counting logic; use real data instead.
partition = objects.Partition(
name=part.get('name'),
count=part.get('num'),
device=part.get('master_dev'),
begin=part.get('begin'),
end=part.get('end'),
partition_type=part.get('type'),
flags=part.get('flags')
)
parted.partitions.append(partition)
mnt_point = self._get_mount_point_from_fstab(fstab,
part['uuid'])
if mnt_point:
LOG.debug('Adding filesystem: '
'device=%s fs_type=%s mount_point=%s '
'to hw schema' %
(part.get('name'), part.get('fstype'),
mnt_point))
partition_schema.add_fs(device=part.get('name'),
mount=mnt_point,
fs_type=part.get('fstype', ''))
else:
LOG.warning("Not adding %s filesystem to hw_schema because"
" it has no mount point in fstab"
% part.get('name'))
return partition_schema
def _find_hw_fstab(self):
mount_dir = '/mnt'
fstabs = []
fstab_fss = filter(lambda fss: fss.mount == '/',
self.partition_scheme.fss)
for fss in fstab_fss:
fss_dev = fss.device
fstab_path = os.path.join(mount_dir, 'etc', 'fstab')
try:
utils.execute('mount', fss_dev, mount_dir, run_as_root=True,
check_exit_code=[0])
fstab, _ = utils.execute('cat', fstab_path,
run_as_root=True,
check_exit_code=[0])
utils.execute('umount', mount_dir, run_as_root=True)
except errors.ProcessExecutionError as e:
raise errors.HardwarePartitionSchemeCannotBeReadError(
"Cannot read fstab from %s partition. Error occurred: %s"
% (fss_dev, str(e))
)
LOG.info("fstab has been found on %s:\n%s" % (fss_dev, fstab))
fstabs.append(fstab)
return '\n'.join(fstabs)
def _get_mount_point_from_fstab(self, fstab, part_uuid):
res = None
if not part_uuid:
return res
for line in fstab.splitlines():
# TODO(lobur): handle fstab written using not partition UUID
if part_uuid in line:
res = line.split()[1]
break
return res
def _match_device(self, hu_disk, ks_disk):
"""Check if hu_disk and ks_disk are the same device
Tries to figure out if hu_disk got from hu.list_block_devices
and ks_spaces_disk given correspond to the same disk device. This
is the simplified version of hu.match_device
:param hu_disk: A dict representing disk device how
it is given by list_block_devices method.
:param ks_disk: A dict representing disk device according to
ks_spaces format.
:returns: True if hu_disk matches ks_spaces_disk else False.
"""
id_type = ks_disk['id']['type']
id_value = ks_disk['id']['value']
if isinstance(hu_disk.get(id_type), (list, tuple)):
return any((id_value in value for value in hu_disk[id_type]))
else:
return id_value in hu_disk[id_type]
@property
def hu_disks(self):
"""Actual disks which are available on this node
It is a list of dicts which are formatted other way than
ks_spaces disks. To match both of those formats use
_match_device method.
"""
if not getattr(self, '_hu_disks', None):
self._hu_disks = self._get_block_devices()
return self._hu_disks
@property
def hu_vgs(self):
"""Actual disks which are available on this node
It is a list of dicts which are formatted other way than
ks_spaces disks. To match both of those formats use
_match_data_by_pattern method.
"""
if not getattr(self, '_hu_vgs', None):
self._hu_vgs = self._get_vg_devices()
return self._hu_vgs
def _get_vg_devices(self):
devices = hu.get_vg_devices_from_udev_db()
vg_dev_infos = []
for device in devices:
vg_dev_infos.append(self._get_block_device_info(device))
return vg_dev_infos
def _get_block_devices(self):
# Extends original result of hu.get_device_info with hu.get_device_ids
# and add scsi param.
devices = hu.get_block_devices_from_udev_db()
block_dev_infos = []
for device in devices:
block_dev_infos.append(self._get_block_device_info(device))
return block_dev_infos
def _get_block_device_info(self, device):
device_info = {
'name': device,
'scsi': hu.scsi_address(device)
}
hu_device_info = hu.get_device_info(device)
if hu_device_info:
device_info.update(hu_device_info)
ids = hu.get_device_ids(device)
if not ids:
# DEVLINKS not presented on virtual environment.
# Let's keep it here for development purpose.
devpath = device_info.get('uspec', {}).get('DEVPATH')
if devpath:
ids = [devpath]
device_info['path'] = ids
return {k: v for k, v in device_info.iteritems() if v}
return tuple(self.fs_by_os)
def _get_grub(self):
LOG.debug('--- Parse grub settings ---')
@ -541,67 +166,6 @@ class Ironic(GenericDataDriver):
grub.kernel_params = kernel_params
return grub
def _disk_dev(self, ks_disk):
# first we try to find a device that matches ks_disk
# comparing by-id and by-path links
matched = [hu_disk['name'] for hu_disk in self.hu_disks
if self._match_device(hu_disk, ks_disk)]
# if we can not find a device by its by-id and by-path links
if not matched or len(matched) > 1:
raise errors.DiskNotFoundError(
'Disk not found with %s: %s' % (
ks_disk['id']['type'], ks_disk['id']['value']))
return matched[0]
def _disk_vg_dev(self, ks_vgs):
# first we try to find a device that matches ks_disk
# comparing by-id and by-path links
matched = [hu_vg['name'] for hu_vg in self.hu_vgs
if self._match_data_by_pattern(hu_vg, ks_vgs)]
# if we can not find a device by its by-id and by-path links
if not matched or len(matched) > 1:
raise errors.DiskNotFoundError(
'Disk not found with %s: %s' % (
ks_vgs['id']['type'], ks_vgs['id']['value']))
return matched[0]
def _get_device_ids(self, dev_type):
device_ids = []
if dev_type == hu.DISK:
devs = hu.get_block_devices_from_udev_db()
elif dev_type == hu.PARTITION:
devs = hu.get_partitions_from_udev_db()
for dev in devs:
ids = hu.get_device_ids(dev)
if ids:
device_ids.append(ids)
return device_ids
@property
def hu_partitions(self):
if not getattr(self, '_hu_partitions', None):
self._hu_partitions = self._get_device_ids(dev_type=hu.PARTITION)
return self._hu_partitions
def _disk_partition(self, ks_partition):
matched = [hu_partition['name'] for hu_partition in self.hu_partitions
if self._match_data_by_pattern(hu_partition, ks_partition)]
if not matched or len(matched) > 1:
raise errors.DiskNotFoundError(
'Disk not found with %s: %s' % (
ks_partition['id']['type'], ks_partition['id']['value']))
return matched[0]
def _match_data_by_pattern(self, hu_data, ks_data):
id_type = ks_data['id']['type']
id_value = ks_data['id']['value']
if isinstance(hu_data.get(id_type), (list, tuple)):
return any((fnmatch.fnmatch(value, id_value) for value in
hu_data.get(id_type, [])))
else:
return fnmatch.fnmatch(hu_data.get(id_type, ''), id_value)
@classmethod
def validate_data(cls, data):
super(Ironic, cls).validate_data(data)
@ -615,147 +179,12 @@ class Ironic(GenericDataDriver):
'disk.')
def convert_size(data):
data = convert_string_sizes(data, target='B')
data = _resolve_all_sizes(data)
return data
def _resolve_all_sizes(data):
# NOTE(oberezovskyi): "disks" should be processed before "vgs"
disks = filter(lambda space: space['type'] == 'disk', data)
disks = _resolve_sizes(disks)
vgs = filter(lambda space: space['type'] == 'vg', data)
_set_vg_sizes(vgs, disks)
vgs = _resolve_sizes(vgs, retain_space_size=False)
return disks + vgs
def _set_vg_sizes(vgs, disks):
pvs = []
for disk in disks:
pvs += [vol for vol in disk['volumes'] if vol['type'] == 'pv']
vg_sizes = collections.defaultdict(int)
for pv in pvs:
vg_sizes[pv['vg']] += pv['size'] - pv.get(
'lvm_meta_size', DEFAULT_LVM_META_SIZE)
for vg in vgs:
vg['size'] = vg_sizes[vg['id']]
def _convert_percentage_sizes(space, size):
for volume in space['volumes']:
if isinstance(volume['size'], basestring) and '%' in volume['size']:
# NOTE(lobur): decimal results of % conversion are floored.
volume['size'] = size * int(volume['size'].split('%')[0]) // 100
def _get_disk_id(disk):
if isinstance(disk['id'], dict):
return '{}: {}'.format(disk['id']['type'],
disk['id']['value'])
return disk['id']
def _get_space_size(space, retain_size):
if not space.get('size'):
raise ValueError('Size of {type} "{id}" is not '
'specified'.format(type=space['type'],
id=_get_disk_id(space)))
return space['size'] if retain_size else space.pop('size')
def _process_space_claims(space):
claimed_space = 0
unsized_volume = None
for volume in space['volumes']:
if (isinstance(volume['size'], basestring) and
volume['size'] == 'remaining'):
if not unsized_volume:
unsized_volume = volume
else:
raise ValueError('Detected multiple volumes attempting to '
'claim remaining size {type} "{id}"'
''.format(type=space['type'],
id=_get_disk_id(space)))
else:
claimed_space += volume['size']
return claimed_space, unsized_volume
def _resolve_sizes(spaces, retain_space_size=True):
for space in spaces:
space_size = _get_space_size(space, retain_space_size)
# NOTE(oberezovskyi): DEFAULT_GRUB_SIZE is size of grub stage 1.5
# (bios_grub) partition
taken_space = DEFAULT_GRUB_SIZE if space['type'] == 'disk' else 0
_convert_percentage_sizes(space, space_size)
claimed_space, unsized_volume = _process_space_claims(space)
taken_space += claimed_space
delta = space_size - taken_space
delta_MiB = utils.B2MiB(abs(delta))
if delta < 0:
raise ValueError('Sum of requested filesystem sizes exceeds space '
'available on {type} "{id}" by {delta} '
'MiB'.format(delta=delta_MiB, type=space['type'],
id=_get_disk_id(space)))
elif unsized_volume:
ref = (unsized_volume['mount'] if unsized_volume.get(
'mount') else unsized_volume.get('pv'))
if delta:
LOG.info('Claiming remaining {delta} MiB for {ref} '
'volume/partition on {type} {id}.'
''.format(delta=delta_MiB,
type=space['type'],
id=_get_disk_id(space),
ref=ref))
unsized_volume['size'] = delta
else:
raise ValueError(
'Volume/partition {ref} requested all remaining space, '
'but no unclaimed space remains on {type} {id}'.format(
type=space['type'],
id=_get_disk_id(space),
ref=ref))
else:
LOG.info('{delta} MiB of unclaimed space remains on {type} "{id}" '
'after completing allocations.'.format(delta=delta_MiB,
type=space['type'],
id=_get_disk_id(
space)))
return spaces
def convert_string_sizes(data, target=None):
if target is not None:
conv_args = {'target': target}
else:
conv_args = {}
if isinstance(data, (list, tuple)):
return [convert_string_sizes(el, target=target) for el in data]
if isinstance(data, dict):
for k, v in data.items():
if (isinstance(v, basestring) and
any(x in v for x in ('%', 'remaining'))):
continue
if k in ('size', 'lvm_meta_size'):
data[k] = utils.human2bytes(v, **conv_args)
else:
data[k] = convert_string_sizes(v, target=target)
return data
class StorageParser(object):
def __init__(self, data, image_schema):
def __init__(self, data, image_scheme):
self.storage = objects.block_device.StorageSubsystem()
self.disk_finder = block_device.DeviceFinder()
operation_systems = self._collect_operation_systems(image_schema)
operation_systems = self._collect_operation_systems(image_scheme)
self._existing_os_binding = set(operation_systems)
self._default_os_binding = operation_systems[:1]
@ -774,8 +203,8 @@ class StorageParser(object):
self._assemble_mdraid()
self._validate()
def _collect_operation_systems(self, image_schema):
return [image.os_id for image in image_schema.images]
def _collect_operation_systems(self, image_scheme):
return [image.os_id for image in image_scheme.images]
def _parse(self, data):
for raw in data['partitions']:
@ -1011,12 +440,12 @@ class StorageParser(object):
return idnr
class DeprecatedPartitionSchemaBuilder(object):
class DeprecatedPartitionSchemeBuilder(object):
def __init__(self, storage_claim, multiboot_partition):
self.storage_claim = storage_claim
self.multiboot_partition = multiboot_partition
self.schema = objects.PartitionSchema()
self.schema = objects.PartitionScheme()
self._convert()
@ -1032,7 +461,7 @@ class DeprecatedPartitionSchemaBuilder(object):
def _convert_disk(self, disk):
old_disk = self.schema.add_parted(
name=disk.dev, label='gpt', install_bootloader=True,
size=self._unpack_size(disk.size).bytes)
disk_size=self._unpack_size(disk.size).bytes)
for claim in disk.items:
args = {}

View File

@ -11,6 +11,11 @@
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"image_pull_url",
"target"
],
"properties": {
"image_name": {
"type": "string"

View File

@ -89,7 +89,22 @@ class GenericDeployDriver(BaseDeployDriver, mixins.MountableMixin):
def do_partitioning(self):
LOG.debug('--- Partitioning disks (do_partitioning) ---')
PolicyPartitioner(self.driver).partition()
try:
storage_claim = self.driver.storage_claim
except AttributeError:
# TODO(dbogun): completely replace deprecated partitioning code
PolicyPartitioner(self.driver).partition()
else:
handlers_map = {
'clean': PartitionPolicyClean,
'verify': PartitionPolicyVerify,
'nailgun_legacy': PartitionPolicyNailgun}
handler = handlers_map[self.driver.partitions_policy]
handler = handler(self, storage_claim)
handler()
LOG.debug('--- Partitioning disks END (do_partitioning) ---')
def do_configdrive(self):
@ -304,160 +319,21 @@ class PolicyPartitioner(object):
% self.driver.partitions_policy)
policy_handlers = {
"verify": self._handle_verify,
"clean": self._handle_clean,
"nailgun_legacy": self._handle_nailgun_legacy,
}
known_policies = policy_handlers.keys()
if policy not in known_policies:
if policy not in policy_handlers:
raise errors.WrongPartitionPolicyError(
"'%s' policy is not one of known ones: %s"
% (policy, known_policies))
% (policy, ', '.join(policy_handlers)))
policy_handlers[policy]()
def _handle_verify(self):
provision_schema = self.driver.partition_scheme.to_dict()
hw_schema = self.driver.hw_partition_scheme.to_dict()
PartitionSchemaCompareTool().assert_no_diff(provision_schema,
hw_schema)
self.partitioning._do_clean_filesystems()
@staticmethod
def _verify_disk_size(parteds, hu_disks):
for parted in parteds:
disks = [d for d in hu_disks if d.get('name') == parted.name]
if not disks:
raise errors.DiskNotFoundError(
'No physical disks found matching: %s' % parted.name)
try:
disk_size_bytes = disks[0]['bspec']['size64']
disk_size_bytes = int(disk_size_bytes)
except (KeyError, IndexError, ValueError):
raise ValueError('Cannot read size of the disk: %s'
% disks[0].get('name'))
# It's safer to understate the physical disk size
if parted.size > disk_size_bytes:
raise errors.NotEnoughSpaceError(
'Partition scheme for: %(disk)s exceeds the size of the '
'disk. Scheme size is %(scheme_size)s, and disk size '
'is %(disk_size)s.' % {
'disk': parted.name, 'scheme_size': parted.disk_size,
'disk_size': disk_size_bytes})
def _handle_clean(self):
self._verify_disk_size(self.driver.partition_scheme.parteds,
self.driver.hu_disks)
self.partitioning._do_partitioning()
def _handle_nailgun_legacy(self):
# Corresponds to nailgun behavior.
self.partitioning.execute()
class PartitionSchemaCompareTool(object):
def assert_no_diff(self, user_schema, hw_schema):
usr_sch = self._prepare_user_schema(user_schema, hw_schema)
hw_sch = self._prepare_hw_schema(user_schema, hw_schema)
# NOTE(lobur): this may not work on bm hardware: because of the
# partition alignments sizes may not match precisely, so need to
# write own diff tool
if not usr_sch == hw_sch:
diff_str = utils.dict_diff(usr_sch, hw_sch,
"user_schema", "hw_schema")
raise errors.PartitionSchemeMismatchError(diff_str)
LOG.debug("hw_schema and user_schema matched")
def _prepare_user_schema(self, user_schema, hw_schema):
LOG.debug('Preparing user_schema for verification:\n%s' %
user_schema)
# Set all keep_data (preserve) flags to false.
# They are just instructions to deploy driver and do not stored on
# resulting partitions, so we have no means to read them from
# hw_schema
for fs in user_schema['fss']:
fs['keep_data'] = False
fs['os_id'] = []
for parted in user_schema['parteds']:
for part in parted['partitions']:
part['keep_data'] = False
self._begin_end_into_size(user_schema)
LOG.debug('Prepared user_schema is:\n%s' % user_schema)
return user_schema
def _prepare_hw_schema(self, user_schema, hw_schema):
LOG.debug('Preparing hw_schema to verification:\n%s' %
hw_schema)
user_disks = [p['name'] for p in user_schema['parteds']]
# Ignore disks which are not mentioned in user_schema
filtered_disks = []
for disk in hw_schema['parteds']:
if disk['name'] in user_disks:
filtered_disks.append(disk)
else:
LOG.info("Node disk '%s' is not mentioned in deploy_config"
" thus it will be skipped." % disk['name'])
hw_schema['parteds'] = filtered_disks
# Ignore filesystems that belong to disk not mentioned in user_schema
filtered_fss = []
for fs in hw_schema['fss']:
if fs['device'].rstrip("0123456789") in user_disks:
filtered_fss.append(fs)
else:
LOG.info("Node filesystem '%s' belongs to disk not mentioned"
" in deploy_config thus it will be skipped."
% fs['device'])
hw_schema['fss'] = filtered_fss
# Transform filesystem types
for fs in hw_schema['fss']:
fs['fs_type'] = self._transform_fs_type(fs['fs_type'])
fs['os_id'] = []
self._begin_end_into_size(hw_schema)
LOG.debug('Prepared hw_schema is:\n%s' % hw_schema)
return hw_schema
def _transform_fs_type(self, hw_fs_type):
# hw fstype name pattern -> fstype name in user schema
hw_fs_to_user_fs_map = {
'linux-swap': 'swap'
}
for hw_fs_pattern, usr_schema_val in hw_fs_to_user_fs_map.iteritems():
if hw_fs_pattern in hw_fs_type:
LOG.info("Node fs type '%s' is transformed to the user "
"schema type as '%s'."
% (hw_fs_type, usr_schema_val))
return usr_schema_val
return hw_fs_type
@staticmethod
def _begin_end_into_size(schema):
# We can't rely on ("begin", "end") fields created from user request.
# Because they don't take in account reserved zones added by partition
# schema.
# Order plus size plus type should be strict enough for our check.
for disk in schema['parteds']:
disk.pop('size', None)
for p in disk['partitions']:
if 'size' not in p:
p['size'] = p['end'] - p['begin']
del p['begin']
del p['end']
@six.add_metaclass(abc.ABCMeta)
class AbstractPartitionPolicy(object):
space_allocation_accuracy = block_device.SizeUnit(1, 'MiB')
@ -564,7 +440,7 @@ class AbstractPartitionPolicy(object):
claim.payload.guid = actual.payload.guid
continue
raise errors.PartitionSchemaMismatchError(
raise errors.PartitionSchemeMismatchError(
'Unable to resolv claim devices into physical devices. '
'Claim and physical devices partitions are different. '
'(dev={}, {}: {!r} != {!r})'.format(
@ -738,7 +614,7 @@ class PartitionPolicyVerify(AbstractPartitionPolicy):
desired_partition = self._grab_storage_segments(
vg, self._lvm_fuzzy_cmp_factor)
except errors.VGNotFoundError:
raise errors.PartitionSchemaMismatchError(
raise errors.PartitionSchemeMismatchError(
'There is no LVMvg {}'.format(vg_claim.idnr))
if actual_partition == desired_partition:
@ -762,7 +638,7 @@ class PartitionPolicyVerify(AbstractPartitionPolicy):
# TODO(dbogun): increase verbosity
def _report_mismatch(self, dev, desired, actual):
raise errors.PartitionSchemaMismatchError(
raise errors.PartitionSchemeMismatchError(
'Partition mismatch on {}'.format(dev))
def _make_filesystem(self, claim):

View File

@ -1,192 +0,0 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from bareon.drivers.data import generic
from bareon.utils.partition import MiB
from bareon.utils.partition import TiB
class TestKsDisks(unittest2.TestCase):
def setUp(self):
super(TestKsDisks, self).setUp()
self.driver = _DummyDataDriver({})
self.driver._partition_data = self.mock_part_data = mock.MagicMock()
def test_no_partition_data(self):
self.mock_part_data.return_value = []
desired = []
result = self.driver._ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_no_partitions_valid_size(self):
self.mock_part_data.return_value = [
{'size': -100, 'type': 'disk'},
{'size': 0, 'type': 'disk'}
]
desired = []
result = self.driver._ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_no_partitions_valid_type(self):
self.mock_part_data.return_value = [
{'size': 100, 'type': 'vg'},
{'size': 200, 'type': 'pv'}
]
desired = []
result = self.driver._ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_valid_data(self):
self.mock_part_data.return_value = [
{'size': 100, 'type': 'vg'},
{'size': 200, 'type': 'disk'}
]
desired = [{'size': 200, 'type': 'disk'}]
result = self.driver._ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
class TestKsVgs(unittest2.TestCase):
def setUp(self):
super(TestKsVgs, self).setUp()
self.driver = _DummyDataDriver({})
self.driver._partition_data = self.mock_part_data = mock.MagicMock()
def test_no_partition_data(self):
self.mock_part_data.return_value = []
desired = []
result = self.driver._ks_vgs
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_no_partitions_valid_type(self):
self.mock_part_data.return_value = [
{'type': 'disk'},
{'type': 'pv'}
]
desired = []
result = self.driver._ks_vgs
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_valid_data(self):
self.mock_part_data.return_value = [
{'type': 'vg'},
{'type': 'disk'}
]
desired = [{'type': 'vg'}]
result = self.driver._ks_vgs
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
class TestSmallKsDisks(unittest2.TestCase):
def setUp(self):
super(TestSmallKsDisks, self).setUp()
self.driver = _DummyDataDriver({})
self.driver._partition_data = self.mock_part_data = mock.MagicMock()
def test_no_partition_data(self):
self.mock_part_data.return_value = []
desired = []
result = self.driver._small_ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_no_partitions_valid_size(self):
self.mock_part_data.return_value = [
{'size': 3 * TiB, 'type': 'disk'},
{'size': 5 * TiB, 'type': 'disk'}
]
desired = []
result = self.driver._small_ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_valid_data(self):
self.mock_part_data.return_value = [
{'size': 3 * MiB, 'type': 'vg'},
{'size': 1 * MiB, 'type': 'disk'}
]
desired = [{'size': 1 * MiB, 'type': 'disk'}]
result = self.driver._small_ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
class TestGetLabel(unittest2.TestCase):
def setUp(self):
super(TestGetLabel, self).setUp()
self.driver = _DummyDataDriver({})
def test_no_label(self):
label = None
desired = ''
result = self.driver._getlabel(label)
self.assertEqual(result, desired)
def test_long_label(self):
label = 'l' * 100
desired = ' -L {0} '.format('l' * 12)
result = self.driver._getlabel(label)
self.assertEqual(result, desired)
def test_valid_label(self):
label = 'label'
desired = ' -L {0} '.format(label)
result = self.driver._getlabel(label)
self.assertEqual(result, desired)
class _DummyDataDriver(generic.GenericDataDriver):
def _partition_data(self):
return []
@classmethod
def validate_data(cls, payload):
pass

View File

@ -20,9 +20,7 @@ import unittest2
from oslo_config import cfg
from bareon.actions import partitioning
from bareon.drivers.deploy import generic
from bareon import errors
from bareon.objects.partition.fs import FileSystem
CONF = cfg.CONF
@ -518,206 +516,3 @@ class TestGenerateFstab(unittest2.TestCase):
mock.call.write('UUID=1 / ext4 defaults 0 0\n'),
mock.call.write('UUID=2 /usr ext4 noatime 0 0\n')],
any_order=True)
@mock.patch("bareon.drivers.deploy.generic.PartitionSchemaCompareTool")
class TestPolicyPartitioner(unittest2.TestCase):
def setup(self, policy, cmp_mock, keep_data_flag_present=False):
cmp_mock().configure_mock(_mock_unsafe=True)
cmp_mock().assert_no_diff.return_value = True
self.driver = mock.Mock(
partitions_policy=policy,
partition_scheme=mock.Mock(
**{'to_dict.return_value': {"test": 1},
'skip_partitioning': keep_data_flag_present}),
hw_partition_scheme=mock.Mock(
**{'to_dict.return_value': {"test": 2}}),
)
self.pp = generic.PolicyPartitioner(self.driver)
self.pp.partitioning = partitioning.PartitioningAction(self.driver)
self.pp.partitioning._do_clean_filesystems = mock.Mock()
self.pp.partitioning._do_partitioning = mock.Mock()
self.clean_fs_mock = self.pp.partitioning._do_clean_filesystems
self.part_mock = self.pp.partitioning._do_partitioning
def test_partition_verify(self, cmp_mock):
self.setup('verify', cmp_mock)
self.pp.partition()
cmp_mock().assert_no_diff.assert_called_once_with(
{'test': 1}, {'test': 2}
)
self.clean_fs_mock.assert_has_calls([])
self.part_mock.assert_has_calls([])
def test_partition_preserve(self, cmp_mock):
pass
def test_partition_nailgun_legacy_skip(self, cmp_mock):
self.setup('nailgun_legacy', cmp_mock,
keep_data_flag_present=True)
self.pp.partition()
self.clean_fs_mock.assert_called_once_with()
self.part_mock.assert_has_calls([])
def test_partition_nailgun_legacy_partition(self, cmp_mock):
self.setup('nailgun_legacy', cmp_mock,
keep_data_flag_present=False)
self.pp.partition()
self.clean_fs_mock.assert_has_calls([])
self.part_mock.assert_called_once_with()
def test_partition_clean(self, cmp_mock):
self.setup('clean', cmp_mock)
verify_mock = self.pp._verify_disk_size = mock.Mock()
self.pp.partition()
cmp_mock().assert_no_diff.assert_has_calls([])
self.clean_fs_mock.assert_has_calls([])
self.part_mock.assert_called_once_with()
verify_mock.assert_called_once_with(
self.driver.partition_scheme.parteds,
self.driver.hu_disks)
def test_unknown_policy(self, cmp_mock):
self.setup('non-existent', cmp_mock)
self.assertRaises(errors.WrongPartitionPolicyError,
self.pp.partition)
class TestPartitionSchemaCompareTool(unittest2.TestCase):
def setUp(self):
super(TestPartitionSchemaCompareTool, self).setUp()
self.comp = generic.PartitionSchemaCompareTool()
# Points to pay attention:
# Some keep data flags are set, which are translated to False.
self.user_schema = {
'pvs': [], 'lvs': [],
'fss': [
{'keep_data': True, 'mount': u'/', 'fs_label': '',
'fs_type': u'ext4', 'fs_options': '', 'device': '/dev/vda2'},
{'keep_data': True, 'mount': u'/usr', 'fs_label': '',
'fs_type': u'ext4', 'fs_options': '', 'device': '/dev/vda3'},
{'keep_data': True, 'mount': u'swap', 'fs_label': '',
'fs_type': u'swap', 'fs_options': '', 'device': '/dev/vda4'},
],
'parteds': [
{'install_bootloader': True,
'partitions': [
{'count': 1, 'begin': 1, 'end': 25,
'name': '/dev/vda1', 'keep_data': False,
'device': '/dev/vda', 'flags': ['bios_grub'],
'guid': None, 'configdrive': False,
'partition_type': 'primary'},
{'count': 2, 'begin': 25, 'end': 4025,
'name': '/dev/vda2', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'},
{'count': 3, 'begin': 4025, 'end': 7025,
'name': '/dev/vda3', 'keep_data': True,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False,
'partition_type': 'primary'},
{'count': 3, 'begin': 7025, 'end': 8025,
'name': '/dev/vda4', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False,
'partition_type': 'primary'}
],
'name': '/dev/vda', 'label': 'gpt'}], 'mds': [], 'vgs': []}
# Has extra disk - vdb, which is ignored.
self.hw_schema = {
'pvs': [], 'lvs': [],
'fss': [
{'keep_data': False, 'mount': '/', 'fs_label': '',
'fs_type': 'ext4', 'fs_options': '', 'device': '/dev/vda2'},
{'keep_data': False, 'mount': '/usr', 'fs_label': '',
'fs_type': 'ext4', 'fs_options': '', 'device': '/dev/vda3'},
{'keep_data': False, 'mount': 'swap', 'fs_label': '',
'fs_type': 'linux-swap(v1)', 'fs_options': '',
'device': '/dev/vda4'}
],
'parteds': [
{'install_bootloader': True,
'partitions': [
{'count': 1, 'begin': 1, 'end': 25,
'name': '/dev/vda1', 'keep_data': False,
'device': '/dev/vda', 'flags': ['bios_grub'],
'guid': None, 'configdrive': False,
'partition_type': 'primary'},
{'count': 2, 'begin': 25, 'end': 4025,
'name': '/dev/vda2', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'},
{'count': 3, 'begin': 4025, 'end': 7025,
'name': '/dev/vda3', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'},
{'count': 3, 'begin': 7025, 'end': 8025,
'name': '/dev/vda4', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False,
'partition_type': 'primary'}
],
'name': '/dev/vda', 'label': 'gpt'},
{'install_bootloader': False,
'partitions': [
{'count': 1, 'begin': 0, 'end': 101,
'name': '/dev/vdb1', 'keep_data': False,
'device': '/dev/vdb', 'flags': [],
'guid': None, 'configdrive': False,
'partition_type': None}],
'name': '/dev/vdb', 'label': 'loop'}],
'mds': [], 'vgs': []}
def test_match(self):
self.comp.assert_no_diff(self.user_schema, self.hw_schema)
def test_mismatch_extra_part_in_user_schema(self):
self.user_schema['parteds'][0]['partitions'].append({
'count': 3, 'begin': 4025, 'end': 7025,
'name': '/dev/vda4', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'
})
self.assertRaises(errors.PartitionSchemeMismatchError,
self.comp.assert_no_diff,
self.user_schema, self.hw_schema)
def test_mismatch_extra_disk_in_user_schema(self):
self.user_schema['parteds'].append({
'install_bootloader': True,
'partitions': [
{'count': 1, 'begin': 0, 'end': 101,
'name': '/dev/vdc1', 'keep_data': True,
'device': '/dev/vdc', 'flags': [],
'guid': None, 'configdrive': False,
'partition_type': None}],
'name': '/dev/vdc', 'label': 'loop'
})
self.assertRaises(errors.PartitionSchemeMismatchError,
self.comp.assert_no_diff,
self.user_schema, self.hw_schema)
def test_mismatch_extra_part_on_hw_schema(self):
self.hw_schema['parteds'][0]['partitions'].append({
'count': 3, 'begin': 4025, 'end': 7025,
'name': '/dev/vda4', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'
})
self.assertRaises(errors.PartitionSchemeMismatchError,
self.comp.assert_no_diff,
self.user_schema, self.hw_schema)

View File

@ -227,183 +227,21 @@ LIST_BLOCK_DEVICES_SAMPLE = [
class _IronicTest(unittest2.TestCase):
_dummy_deployment_config = {
'partitions': []
}
def setUp(self):
super(_IronicTest, self).setUp()
with mock.patch.object(ironic.Ironic, 'validate_data'):
self.data_driver = ironic.Ironic({'partitions': []})
class TestIronicMatch(_IronicTest):
def test_match_device_by_scsi_matches(self):
# matches by scsi address
fake_ks_disk = {
'id': {
'type': 'scsi',
'value': '0:0:0:1'
}
}
fake_hu_disk = {
'scsi': '0:0:0:1'
}
self.assertTrue(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_scsi_not_matches(self):
# matches by scsi address
fake_ks_disk = {
'id': {
'type': 'scsi',
'value': '0:0:0:1'
}
}
fake_hu_disk = {
'scsi': '5:0:0:1'
}
self.assertFalse(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_path_matches(self):
fake_ks_disk = {
'id': {
'type': 'path',
'value': 'disk/by-path/pci-0000:00:07.0-virtio-pci-virtio3'
}
}
fake_hu_disk = {
'path': [
"/dev/disk/by-path/pci-0000:00:07.0-virtio-pci-virtio3",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
self.assertTrue(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_path_not_matches(self):
fake_ks_disk = {
'id': {
'type': 'path',
'value': 'disk/by-path/pci-0000:00:07.0-virtio-pci-virtio3'
}
}
fake_hu_disk = {
'path': [
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
self.assertFalse(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_name_matches(self):
fake_ks_disk = {
'id': {
'type': 'name',
'value': 'sda'
}
}
fake_hu_disk = {
'name': '/dev/sda'
}
self.assertTrue(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_name_not_matches(self):
fake_ks_disk = {
'id': {
'type': 'name',
'value': 'sda'
}
}
fake_hu_disk = {
'name': '/dev/sdd'
}
self.assertFalse(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
@mock.patch('bareon.utils.hardware.scsi_address', mock.Mock())
class TestNailgunMockedMeta(_IronicTest):
def test_partition_scheme(self):
with mock.patch.object(ironic.Ironic, 'validate_data'):
data_driver = ironic.Ironic(PROVISION_SAMPLE_DATA_SWIFT)
data_driver.get_image_ids = mock.MagicMock
mock_devices = data_driver._get_block_devices = mock.MagicMock()
mock_devices.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_scheme = data_driver.partition_scheme
self.assertEqual(5, len(p_scheme.fss))
self.assertEqual(5, len(p_scheme.pvs))
self.assertEqual(3, len(p_scheme.lvs))
self.assertEqual(2, len(p_scheme.vgs))
self.assertEqual(3, len(p_scheme.parteds))
@mock.patch('bareon.utils.hardware.get_block_devices_from_udev_db')
class TestGetBlockDevices(_IronicTest):
def setUp(self):
super(TestGetBlockDevices, self).setUp()
self.mock_devices = mock.Mock()
self.data_driver._get_block_device_info = self.mock_devices
def test_no_devices(self, mock_get_block_devices_from_udev_db):
mock_get_block_devices_from_udev_db.return_value = []
result = self.data_driver._get_block_devices()
self.assertEqual(result, [])
mock_get_block_devices_from_udev_db.assert_called_once_with()
self.assertEqual(self.mock_devices.call_count, 0)
def test_device_info(self, mock_get_block_devices_from_udev_db):
data = {'test': 'fake'}
mock_get_block_devices_from_udev_db.return_value = [data]
self.mock_devices.return_value = block_device = 'test_value'
result = self.data_driver._get_block_devices()
self.assertEqual(result, [block_device])
mock_get_block_devices_from_udev_db.assert_called_once_with()
self.mock_devices.assert_called_once_with(data)
@mock.patch('bareon.utils.hardware.get_device_ids')
@mock.patch('bareon.utils.hardware.get_device_info')
@mock.patch('bareon.utils.hardware.scsi_address')
class TestGetBlockDevice(_IronicTest):
def test_no_device_info(self, mock_scsi_address, mock_get_device_info,
mock_get_device_ids):
device = 'fake_device'
mock_scsi_address.return_value = None
mock_get_device_info.return_value = {}
mock_get_device_ids.return_value = []
result = self.data_driver._get_block_device_info(device)
self.assertEqual(result, {'name': 'fake_device'})
def test_device_info(self, mock_scsi_address, mock_get_device_info,
mock_get_device_ids):
device = 'fake_device'
devpath = ['test/devpath']
uspec = {'DEVPATH': devpath}
mock_get_device_info.return_value = {
'uspec': uspec
}
mock_scsi_address.return_value = scsi_address = '1:0:0:0'
mock_get_device_ids.return_value = devpath
desired = {'path': devpath, 'name': device, 'scsi': scsi_address,
'uspec': uspec}
result = self.data_driver._get_block_device_info(device)
self.assertEqual(result, desired)
mock_get_device_info.assert_called_once_with(device)
mock_scsi_address.assert_called_once_with(device)
with mock.patch.object(ironic.Ironic, 'validate_data'),\
mock.patch('bareon.objects.ironic.block_device.'
'StorageSubsystem'):
self.data_driver = ironic.Ironic(self._dummy_deployment_config)
@mock.patch('bareon.drivers.data.ironic.Ironic.validate_data', mock.Mock())
@mock.patch(
'bareon.drivers.data.ironic.Ironic._get_image_scheme', mock.Mock())
class TestGetGrub(unittest2.TestCase):
@mock.patch('bareon.utils.utils.parse_kernel_cmdline')
def test_kernel_params(self, cmdline_mock):
@ -415,32 +253,41 @@ class TestGetGrub(unittest2.TestCase):
"extrastuff": "test123"
}
data_driver = ironic.Ironic(data)
with mock.patch('bareon.drivers.data.ironic.StorageParser'):
data_driver = ironic.Ironic(data)
self.assertEqual('test_param=test_val BOOTIF=01-52-54-00-a5-55-58',
data_driver.grub.kernel_params)
def test_no_kernel_params(self):
@mock.patch('bareon.utils.utils.parse_kernel_cmdline')
def test_no_kernel_params(self, cmdline_mock):
data = {'deploy_data': {'other_data': "test"},
'partitions': {}}
data_driver = ironic.Ironic(data)
cmdline_mock.return_value = {}
with mock.patch('bareon.drivers.data.ironic.StorageParser'):
data_driver = ironic.Ironic(data)
self.assertEqual('', data_driver.grub.kernel_params)
@mock.patch('bareon.drivers.data.ironic.Ironic.validate_data', mock.Mock())
@mock.patch(
'bareon.drivers.data.ironic.Ironic._get_image_scheme', mock.Mock())
class TestPartitionsPolicy(unittest2.TestCase):
def test_partitions_policy(self):
data = {'partitions_policy': "test_value",
'partitions': {}}
data_driver = ironic.Ironic(data)
with mock.patch('bareon.drivers.data.ironic.StorageParser'):
data_driver = ironic.Ironic(data)
self.assertEqual('test_value', data_driver.partitions_policy)
def test_partitions_policy_default(self):
data = {'partitions': {}}
data_driver = ironic.Ironic(data)
with mock.patch('bareon.drivers.data.ironic.StorageParser'):
data_driver = ironic.Ironic(data)
self.assertEqual('verify', data_driver.partitions_policy)

View File

@ -13,15 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import mock
import unittest2
from bareon.drivers.data import ironic
from bareon import errors
from bareon.utils import hardware as hu
from bareon.utils import utils
MiB = 2 ** 20
@ -43,900 +38,56 @@ class IronicTestAbstract(unittest2.TestCase):
return driver
class TestGetImageSchema(IronicTestAbstract):
def test_get_image_schema(self):
class Abstract(unittest2.TestCase):
dummy_data = {'partitions': []}
def setUp(self):
super(Abstract, self).setUp()
self.data_driver = self._new_data_driver(self.dummy_data)
@staticmethod
@mock.patch(
'bareon.drivers.data.ironic.Ironic._get_image_scheme', mock.Mock())
@mock.patch(
'bareon.drivers.data.ironic.Ironic.validate_data', mock.Mock())
@mock.patch(
'bareon.drivers.data.ironic.Ironic._collect_fs_bindings',
mock.Mock(return_value={}))
@mock.patch(
'bareon.drivers.data.ironic.Ironic._collect_fs_claims',
mock.Mock(return_value={}))
@mock.patch(
'bareon.drivers.data.ironic.Ironic._handle_loader', mock.Mock())
@mock.patch(
'bareon.drivers.data.ironic.StorageParser', mock.Mock())
@mock.patch(
'bareon.drivers.data.ironic.DeprecatedPartitionSchemeBuilder',
mock.Mock())
def _new_data_driver(data):
return ironic.Ironic(data)
class TestGetImageSchema(Abstract):
def test_get_image_scheme(self):
image_uri = 'test_uri'
rsync_flags = '-a -X'
deploy_flags = {'rsync_flags': rsync_flags}
data = {'images': [
{
'image_pull_url': image_uri,
'target': '/',
'name': 'test'
}
], 'image_deploy_flags': deploy_flags}
driver = self.new_data_driver(data)
data = {
'images': [
{
'image_pull_url': image_uri,
'target': '/',
'name': 'test'
}
],
'partitions': [],
'image_deploy_flags': deploy_flags}
result = driver._get_image_scheme()
data_driver = self._new_data_driver(data)
result = data_driver._get_image_scheme()
self.assertEqual(len(result.images), 1)
result_image = result.images[0]
self.assertEqual(result_image.deployment_flags, deploy_flags)
self.assertEqual(result_image.uri, image_uri)
class TestMatchDevice(IronicTestAbstract):
def test_match_list_value(self):
test_type = 'path'
test_value = 'test_path'
ks_disk = {'id': {'type': test_type, 'value': test_value}}
hu_disk = {test_type: ['path1', test_value]}
result = self.driver._match_device(hu_disk, ks_disk)
self.assertTrue(result)
def test_not_match_list_value(self):
test_type = 'path'
test_value = 'test_path'
ks_disk = {'id': {'type': test_type, 'value': test_value}}
hu_disk = {test_type: ['path1', 'path2']}
result = self.driver._match_device(hu_disk, ks_disk)
self.assertFalse(result)
def test_match_one_value(self):
test_type = 'path'
test_value = 'test_path'
ks_disk = {'id': {'type': test_type, 'value': test_value}}
hu_disk = {test_type: test_value}
result = self.driver._match_device(hu_disk, ks_disk)
self.assertTrue(result)
def test_not_match_one_value(self):
test_type = 'path'
test_value = 'test_path'
ks_disk = {'id': {'type': test_type, 'value': test_value}}
hu_disk = {test_type: 'path1'}
result = self.driver._match_device(hu_disk, ks_disk)
self.assertFalse(result)
class TestDiskDev(IronicTestAbstract):
def setUp(self):
super(TestDiskDev, self).setUp()
self.driver._match_device = self.mock_match_device = mock.MagicMock()
def test_no_valid_disks(self):
self.mock_match_device.side_effect = [False, False, False]
self.driver._hu_disks = [{'name': 'disk1'},
{'name': 'disk2'},
{'name': 'disk3'}]
ks_disk = {'id': {'type': 'name', 'value': 'not_found'}}
self.assertRaises(errors.DiskNotFoundError, self.driver._disk_dev,
ks_disk)
def test_more_than_one_valid_disk(self):
self.mock_match_device.side_effect = [True, False, True]
self.driver._hu_disks = [{'name': 'disk1', 'device': 'disk1'},
{'name': 'disk2'},
{'name': 'disk3', 'device': 'disk3'}]
ks_disk = {'id': {'type': 'name', 'value': 'ks_disk'}}
self.assertRaises(errors.DiskNotFoundError, self.driver._disk_dev,
ks_disk)
def test_one_valid_disk(self):
ks_disk = 'ks_disk'
self.mock_match_device.side_effect = [True, False, False]
self.driver._hu_disks = [{'name': 'disk1', 'device': ks_disk},
{'name': 'disk2'},
{'name': 'disk3'}]
result = self.driver._disk_dev(None)
self.assertEqual(result, 'disk1')
class TestMatchPartition(IronicTestAbstract):
def test_match_list_value(self):
test_type = 'path'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value}}
hu_partition = {test_type: ['path1', test_value]}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertTrue(result)
def test_match_list_value_wildcard(self):
test_type = 'path'
test_value_wc = 'test_*'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value_wc}}
hu_partition = {test_type: ['path1', test_value]}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertTrue(result)
def test_not_match_list_value(self):
test_type = 'path'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value}}
hu_partition = {test_type: ['path1', 'path2']}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertFalse(result)
def test_not_match_list_value_wildcard(self):
test_type = 'path'
test_value_wc = 'test_*'
ks_partition = {'id': {'type': test_type, 'value': test_value_wc}}
hu_partition = {test_type: ['path1', 'path2']}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertFalse(result)
def test_match_one_value(self):
test_type = 'path'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value}}
hu_partition = {test_type: test_value}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertTrue(result)
def test_match_one_value_wildcard(self):
test_type = 'path'
test_value_wc = 'test_*'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value_wc}}
hu_partition = {test_type: test_value}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertTrue(result)
def test_not_match_one_value(self):
test_type = 'path'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value}}
hu_partition = {test_type: 'path1'}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertFalse(result)
def test_not_match_one_wildcard(self):
test_type = 'path'
test_value_wc = 'test_*'
ks_partition = {'id': {'type': test_type, 'value': test_value_wc}}
hu_partition = {test_type: 'path1'}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertFalse(result)
class TestDiskPartition(IronicTestAbstract):
def setUp(self):
super(TestDiskPartition, self).setUp()
self.driver._match_data_by_pattern = \
self.mock_match_part = mock.MagicMock()
def test_no_valid_disks(self):
self.mock_match_part.side_effect = [False, False, False]
self.driver._hu_partitions = [{'name': 'disk1'},
{'name': 'disk2'},
{'name': 'disk3'}]
ks_disk = {'id': {'type': 'name', 'value': 'ks_disk'}}
self.assertRaises(errors.DiskNotFoundError,
self.driver._disk_partition, ks_disk)
def test_more_than_one_valid_disk(self):
self.mock_match_part.side_effect = [True, False, True]
self.driver._hu_partitions = [{'name': 'disk1', 'device': 'disk1'},
{'name': 'disk2'},
{'name': 'disk3', 'device': 'disk3'}]
ks_disk = {'id': {'type': 'name', 'value': 'ks_disk'}}
self.assertRaises(errors.DiskNotFoundError,
self.driver._disk_partition, ks_disk)
def test_one_valid_disk(self):
desired = ks_disk = 'ks_disk'
self.mock_match_part.side_effect = [True, False, False]
self.driver._hu_partitions = [{'name': ks_disk},
{'name': 'disk2'},
{'name': 'disk3'}]
result = self.driver._disk_partition(None)
self.assertEqual(result, desired)
@mock.patch('bareon.utils.hardware.get_partitions_from_udev_db')
@mock.patch('bareon.utils.hardware.get_device_ids')
class TestGetPartitionIds(IronicTestAbstract):
def test_no_devices(self, mock_ids, mock_partitions):
mock_partitions.return_value = []
desired = []
result = self.driver._get_device_ids(dev_type=hu.PARTITION)
self.assertEqual(result, desired)
self.assertFalse(mock_ids.called)
def test_no_ids_on_devices(self, mock_ids, mock_partitions):
mock_partitions.return_value = parts = ['/dev/sda1', '/dev/sda2']
mock_ids.return_value = []
desired = []
result = self.driver._get_device_ids(dev_type=hu.PARTITION)
self.assertEqual(result, desired)
mock_ids.assert_has_calls([mock.call(part) for part in parts])
def test_success(self, mock_ids, mock_partitions):
mock_partitions.return_value = parts = ['/dev/sda1', '/dev/sda2']
mock_ids.side_effect = desired = [
{'name': '/dev/sda1'},
{'name': '/dev/sda2'}
]
result = self.driver._get_device_ids(dev_type=hu.PARTITION)
self.assertEqual(result, desired)
mock_ids.assert_has_calls([mock.call(part) for part in parts])
class TestFindHwFstab(IronicTestAbstract):
@mock.patch.object(utils, 'execute')
def test__find_hw_fstab_success_single_disk(self, exec_mock):
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='/', type='ext4', device='/dev/sda', os_id='1'),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id='1')]
self.driver._partition_scheme = ironic.objects.PartitionScheme()
self.driver.partition_scheme.fss = fss
exec_mock.side_effect = [('stdout', 'stderr'),
('fstab_1', 'stderr'),
('stdout', 'stderr')]
res = self.driver._find_hw_fstab()
self.assertEqual('\n'.join(('fstab_1',)), res)
@mock.patch.object(utils, 'execute')
def test__find_hw_fstab_success_two_disk(self, exec_mock):
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='/', type='ext4', device='/dev/sda', os_id='1'),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id='1'),
fs(mount='/', type='ext4', device='/dev/sda', os_id='2')]
self.driver._partition_scheme = ironic.objects.PartitionScheme()
self.driver.partition_scheme.fss = fss
exec_mock.side_effect = [('stdout', 'stderr'),
('fstab_1', 'stderr'),
('stdout', 'stderr'),
('stdout', 'stderr'),
('fstab_2', 'stderr'),
('stdout', 'stderr')]
res = self.driver._find_hw_fstab()
self.assertEqual('\n'.join(('fstab_1', 'fstab_2')), res)
@mock.patch.object(utils, 'execute')
def test__find_hw_fstab_fail_error_while_reading_fstba(self, exec_mock):
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='/etc', type='ext4', device='/dev/sda', os_id='1'),
fs(mount='/', type='ext4', device='/dev/sda', os_id='1')]
self.driver._partition_scheme = ironic.objects.PartitionScheme()
self.driver.partition_scheme.fss = fss
exec_mock.side_effect = [('stdout', 'stderr'),
errors.ProcessExecutionError,
('stdout', 'stderr')]
self.assertRaises(errors.HardwarePartitionSchemeCannotBeReadError,
self.driver._find_hw_fstab)
class TestConvertStringSize(unittest2.TestCase):
@mock.patch.object(ironic.utils, 'human2bytes')
def test_success_single_disk(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': '5 GB',
'type': 'partition'},
{'file_system': 'ext4',
'mount': '/var',
'size': '4000',
'type': 'partition'}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('5 GB'), mock.call('4000')],
any_order=True)
@mock.patch.object(ironic.utils, 'human2bytes')
def test_success_two_disks(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': '5 GB',
'type': 'partition'},
{'file_system': 'ext4',
'mount': '/var',
'size': '4000',
'type': 'partition'}]},
{'extra': [],
'id': {'type': 'name', 'value': 'vdb'},
'size': '2000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/usr',
'size': '2 GB',
'type': 'partition'}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('5 GB'), mock.call('4000'),
mock.call('2000 MB'), mock.call('2 GB')], any_order=True)
@mock.patch.object(ironic.utils, 'human2bytes')
def test_success_lvm_meta_size(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': '5 GB',
'type': 'partition'},
{"size": "4 GB",
"type": "pv",
"lvm_meta_size": "64",
"vg": "os"
}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('5 GB'), mock.call('4 GB'),
mock.call('64')], any_order=True)
@mock.patch.object(ironic.utils, 'human2bytes')
def test_success_ignore_percent(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': '50%',
'type': 'partition'},
{'file_system': 'ext4',
'mount': '/var',
'size': '4000',
'type': 'partition'}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('4000')],
any_order=True)
@mock.patch.object(ironic.utils, 'human2bytes')
def test_success_ignore_remaining(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': 'remaining',
'type': 'partition'},
{'file_system': 'ext4',
'mount': '/var',
'size': '4000',
'type': 'partition'}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('4000')],
any_order=True)
class TestConvertPercentSizes(unittest2.TestCase):
GRUB = ironic.DEFAULT_GRUB_SIZE
LVM = ironic.DEFAULT_LVM_META_SIZE
def test_single_disk_no_percent(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk', 'volumes': [{'size': 5000 * MiB,
'type': 'partition'},
{'size': 4900 * MiB,
'type': 'partition'}]}]
desired = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk', 'volumes': [{'size': 5000 * MiB,
'type': 'partition'},
{'size': 4900 * MiB,
'type': 'partition'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_percent(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk', 'volumes': [{'size': '50%', 'type': 'partition'},
{'size': 4900 * MiB,
'type': 'partition'}]}]
desired = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk', 'volumes': [{'size': 5000 * MiB,
'type': 'partition'},
{'size': 4900 * MiB,
'type': 'partition'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_percent_unicode(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk', 'volumes': [{'size': u'50%', 'type': 'partition'},
{'size': 4900 * MiB,
'type': 'partition'}]}]
desired = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk', 'volumes': [{'size': 5000 * MiB,
'type': 'partition'},
{'size': 4900 * MiB,
'type': 'partition'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_without_size(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'},
'type': 'disk', 'volumes': [{'size': '50%', 'type': 'partition'},
{'size': 4900 * MiB,
'type': 'partition'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_insufficient_size(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk', 'volumes': [{'size': '50%', 'type': 'partition'},
{'size': 6000 * MiB,
'type': 'partition'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_vg(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "100%",
'type': 'lv'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': 5000 * MiB, 'type': 'partition'},
{'size': 4900 * MiB, 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': 4900 * MiB - self.LVM,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_with_vg_insufficient_size(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "60%",
'type': 'lv'},
{'file_system': 'ext3',
'mount': '/media',
'name': 'media',
'size': "60%",
'type': 'lv'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_vg_size_more_than_100_percent(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "101%",
'type': 'lv'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_vg_lvm_meta_size(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home',
'lvm_meta_size': 49 * MiB}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "100%",
'type': 'lv'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': 5000 * MiB, 'type': 'partition'},
{'size': 4900 * MiB, 'type': 'pv',
'vg': 'home',
'lvm_meta_size': 49 * MiB}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': 4900 * MiB - 49 * MiB,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_remaining(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition', 'mount': '/'},
{'size': 'remaining', 'type': 'partition',
'mount': '/home'}]}]
desired = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': 5000 * MiB, 'type': 'partition',
'mount': '/'},
{'size': 5000 * MiB - self.GRUB, 'type': 'partition',
'mount': '/home'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_remaining_nothing_left(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': 10000 * MiB - self.GRUB, 'type': 'partition',
'mount': '/'},
{'size': 'remaining', 'type': 'partition',
'mount': '/home'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_remaining_insufficient_size(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': 'remaining', 'type': 'partition',
'mount': '/'},
{'size': 11000 * MiB, 'type': 'partition',
'mount': '/home'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_lv_remaining(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'mount': '/',
'size': '50%',
'type': 'partition'},
{'size': '49%',
'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'mount': '/var',
'size': 'remaining',
'type': 'lv'},
{'mount': '/home',
'size': '30%',
'type': 'lv'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'mount': '/',
'size': 5000 * MiB,
'type': 'partition'},
{'size': 4900 * MiB,
'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'mount': '/var',
'size': 4836 * MiB - (int(0.3 * 4836 * MiB)),
'type': 'lv'},
{'mount': '/home',
'size': int(0.3 * 4836 * MiB),
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_with_pv_and_lv_remaining(self):
disk_size = 10000 * MiB
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': disk_size,
'type': 'disk',
'volumes': [{'mount': '/',
'size': '50%',
'type': 'partition'},
{'size': 'remaining',
'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'mount': '/var',
'size': 'remaining',
'type': 'lv'},
{'mount': '/home',
'size': '30%',
'type': 'lv'}]}]
expected_partition_size = disk_size * 0.50
expected_home_pv_size = (disk_size - expected_partition_size -
ironic.DEFAULT_GRUB_SIZE)
expected_home_lv_size = int((expected_home_pv_size -
ironic.DEFAULT_LVM_META_SIZE) * 0.3)
expected_var_lv_size = (expected_home_pv_size - expected_home_lv_size -
ironic.DEFAULT_LVM_META_SIZE)
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': disk_size,
'type': 'disk',
'volumes': [{'mount': '/',
'size': expected_partition_size,
'type': 'partition'},
{'size': expected_home_pv_size,
'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'mount': '/var',
'size': expected_var_lv_size,
'type': 'lv'},
{'mount': '/home',
'size': expected_home_lv_size,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_multiple_remaining(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': 'remaining', 'type': 'partition',
'mount': '/'},
{'size': 'remaining', 'type': 'partition',
'mount': '/home'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_vg_reverse_order(self):
start_data = [{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "100%",
'type': 'lv'}]},
{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': 5000 * MiB, 'type': 'partition'},
{'size': 4900 * MiB, 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': 4900 * MiB - self.LVM,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_with_vg_multiple_pv(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [
{'size': 7000 * MiB, 'type': 'pv', 'vg': 'home'}]},
{'id': {'type': 'name', 'value': 'vdb'},
'size': 5000 * MiB,
'type': 'disk',
'volumes': [
{'size': 4000 * MiB, 'type': 'pv', 'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': '50%',
'type': 'lv'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000 * MiB,
'type': 'disk',
'volumes': [{'size': 7000 * MiB, 'type': 'pv',
'vg': 'home'}]},
{'id': {'type': 'name', 'value': 'vdb'},
'size': 5000 * MiB,
'type': 'disk',
'volumes': [{'size': 4000 * MiB, 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': 5500 * MiB - self.LVM,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
class TestProcessPartition(IronicTestAbstract):
def setUp(self):
super(TestProcessPartition, self).setUp()
self.driver._partition_data = self.mock_part_data = mock.MagicMock()
self.driver._add_partition = self.mock_add_part = mock.MagicMock()
self.mock_add_part.return_value = self.mock_part = mock.MagicMock()
self.driver.get_os_ids = self.mock_get_os_ids = mock.MagicMock()
self.driver.get_image_ids = self.mock_get_image_ids = mock.MagicMock()
def test_with_partition_guid(self):
mock_volume = {'partition_guid': 'test_guid'}
self.driver._process_partition(mock_volume, None, None, None)
self.mock_part.set_guid.assert_called_once_with('test_guid')
def test_no_mount_option(self):
mock_volume = {}
mock_part_schema = mock.MagicMock()
self.driver._process_partition(mock_volume, None, None, None)
self.assertEqual(mock_part_schema.call_count, 0)
def test_none_mount_option(self):
mock_volume = {'mount': 'none'}
mock_part_schema = mock.MagicMock()
self.driver._process_partition(mock_volume, None, None, None)
self.assertEqual(mock_part_schema.call_count, 0)
def test_non_boot_volume_non_default(self):
mock_volume = {'mount': '/', 'file_system': 'ext4',
'fstab_options': 'noatime', 'fstab_enabled': False,
'disk_label': 'test_label'}
part_schema = ironic.objects.PartitionScheme()
parted = part_schema.add_parted(name='test_parted', label='gpt')
self.driver._process_partition(mock_volume, None, parted,
part_schema)
self.assertEqual(len(part_schema.fss), 1)
fs = part_schema.fss[0]
self.assertEqual(fs.type, 'ext4')
self.assertEqual(fs.label, ' -L test_label ')
self.assertEqual(fs.fstab_options, 'noatime')
self.assertEqual(fs.fstab_enabled, False)
self.assertEqual(fs.mount, '/')
self.assertFalse(self.driver._boot_done)
def test_non_boot_volume_default(self):
mock_volume = {'mount': '/'}
part_schema = ironic.objects.PartitionScheme()
parted = part_schema.add_parted(name='test_parted', label='gpt')
self.driver._process_partition(mock_volume, None, parted,
part_schema)
self.assertEqual(len(part_schema.fss), 1)
fs = part_schema.fss[0]
self.assertEqual(fs.type, 'xfs')
self.assertEqual(fs.label, '')
self.assertEqual(fs.fstab_options, 'defaults')
self.assertEqual(fs.fstab_enabled, True)
self.assertEqual(fs.mount, '/')
self.assertFalse(self.driver._boot_done)
def test_already_boot_volume(self):
mock_volume = {'mount': '/boot'}
self.driver._boot_done = True
self.driver._process_partition(mock_volume, None, mock.MagicMock(),
mock.MagicMock())
self.assertTrue(self.driver._boot_done)
def test_boot_volume(self):
mock_volume = {'mount': '/boot'}
self.driver._process_partition(mock_volume, None, mock.MagicMock(),
mock.MagicMock())
self.assertTrue(self.driver._boot_done)

View File

@ -547,19 +547,6 @@ class GetIPTestCase(unittest2.TestCase):
mock_execute.assert_called_once_with(*self.cmd)
class TestHumantoBytesConverter(unittest2.TestCase):
def test_default_convertion(self):
result = utils.human2bytes('1000', default='GiB')
self.assertEqual(result, 1024000)
def test_target_convertion(self):
result = utils.human2bytes('1024 MiB', target='GiB')
self.assertEqual(result, 1)
def test_invalid_data(self):
self.assertRaises(ValueError, utils.human2bytes, 'invalid data')
class ParseKernelCmdline(unittest2.TestCase):
def test_parse_kernel_cmdline(self):

View File

@ -477,28 +477,6 @@ def text_diff(text1, text2, sfrom="from", sto="to"):
return "\n".join(diff)
def human2bytes(value, default='MiB', target='MiB'):
symbols = {'custom': ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'),
'iec': ('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')}
bytes = {}
bytes.update({e: 1000.0 ** n for n, e in enumerate(symbols['custom'])})
bytes.update({e: 1024.0 ** n for n, e in enumerate(symbols['iec'], 1)})
try:
number = ''
prefix = default
for index, letter in enumerate(value):
if letter and letter.isdigit() or letter == '.':
number += letter
else:
if value[index] == ' ':
index += 1
prefix = value[index:]
break
return int(float(number) * bytes[prefix] / bytes[target])
except Exception as ex:
raise ValueError('Can\'t convert size %s. Error: %s' % (value, ex))
def list_opts():
"""Returns a list of oslo.config options available in the library.