Contribute core Cray changes to Bareon

This is a contribution of features made in the scope of Cray
Bareon adoption. Even though changes affect a lot of code,
they are not breaking. To proove that we have created a functional
test that covers existing nailgun deployment flow
(see /tests_functional/test_nailgun.py)

We have made Manager a deploy_driver. Current nailgun's manager
has been moved to nailgun deploy driver, which is default. Most of
the changes made in the scope of the project are enclosed within
Ironic data driver and Ironic (swift & rsync) deploy drivers.

To make this review easier I propose the following order:
Review changes to existing code:
- review changes to object model
- review changes related to splitting deploy driver / base drivers
- review changes to common utils
Review the new code:
- review Ironic data driver
- review Ironic deploy drivers

Change-Id: Id2d32a7574e6fcafee09490c39fb114c80407db7
Implements: blueprint size-unit-conversion-and-relative-sizing
Implements: blueprint policy-based-partitioning
Implements: blueprint multi-image-deployment
Implements: blueprint rsync-image-deployment
This commit is contained in:
Mark Goddard 2016-02-26 19:23:28 +02:00 committed by Oleksandr Berezovskyi
parent 2eed19e922
commit 8fe4fff91f
50 changed files with 5883 additions and 592 deletions

View File

@ -114,7 +114,7 @@ This folder contains the python code: drivers, objects, unit tests and utils, ma
- bareon/cmd/agent.py
* That is where executable entry points are. It reads input data and
instantiates Manager class with these data.
instantiates DeployDriver class with these data.
- bareon/manager.py
* That is the file where the top level agent logic is implemented.
It contains all those methods which do something useful (do_*)

View File

@ -18,8 +18,8 @@ from oslo_config import cfg
import six
import yaml
from bareon import manager as manager
from bareon.openstack.common import log as logging
from bareon.utils import utils
from bareon import version
cli_opts = [
@ -33,6 +33,26 @@ cli_opts = [
default='',
help='Input data (json string)'
),
cfg.StrOpt(
'data_driver',
default='nailgun',
help='Data driver'
),
cfg.StrOpt(
'deploy_driver',
default='nailgun',
help='Deploy driver'
),
cfg.StrOpt(
'image_build_dir',
default='/tmp',
help='Directory where the image is supposed to be built',
),
cfg.StrOpt(
'config_drive_path',
default='/tmp/config-drive.img',
help='Path where to store generated config drive image',
),
]
CONF = cfg.CONF
@ -115,10 +135,15 @@ def main(actions=None):
data = yaml.safe_load(f)
LOG.debug('Input data: %s', data)
mgr = manager.Manager(data)
data_driver_class = utils.get_data_driver(CONF.data_driver)
data_driver = data_driver_class(data)
deploy_driver_class = utils.get_deploy_driver(CONF.deploy_driver)
deploy_driver = deploy_driver_class(data_driver)
if actions:
for action in actions:
getattr(mgr, action)()
getattr(deploy_driver, action)()
except Exception as exc:
handle_exception(exc)

View File

View File

@ -37,6 +37,10 @@ class PartitioningDataDriverMixin(object):
def partition_scheme(self):
"""Retruns instance of PartionScheme object"""
@abc.abstractproperty
def hw_partition_scheme(self):
"""Returns instance of PartitionSchema object"""
@six.add_metaclass(abc.ABCMeta)
class ProvisioningDataDriverMixin(object):
@ -45,6 +49,10 @@ class ProvisioningDataDriverMixin(object):
def image_scheme(self):
"""Returns instance of ImageScheme object"""
@abc.abstractproperty
def image_meta(self):
"""Returns image_meta dictionary"""
@abc.abstractproperty
def operating_system(self):
"""Returns instance of OperatingSystem object"""
@ -64,3 +72,11 @@ class GrubBootloaderDataDriverMixin(object):
@abc.abstractproperty
def grub(self):
"""Returns instance of Grub object"""
@six.add_metaclass(abc.ABCMeta)
class MultibootDeploymentMixin(object):
@abc.abstractmethod
def get_os_ids(self):
pass

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from bareon.drivers import nailgun
from bareon.drivers.data import nailgun
from bareon.objects import base

View File

@ -0,0 +1,173 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_config import cfg
from bareon import errors
from bareon.utils import utils
from bareon.drivers.data.base import BaseDataDriver
from bareon.drivers.data.base import ConfigDriveDataDriverMixin
from bareon.drivers.data.base import GrubBootloaderDataDriverMixin
from bareon.drivers.data.base import MultibootDeploymentMixin
from bareon.drivers.data.base import PartitioningDataDriverMixin
from bareon.drivers.data.base import ProvisioningDataDriverMixin
opts = [
cfg.StrOpt(
'config_drive_path',
default='/tmp/config-drive.img',
help='Path where to store generated config drive image',
),
]
CONF = cfg.CONF
CONF.register_opts(opts)
# TODO(lobur): This driver mostly copies nailgun driver. Need to merge them.
class GenericDataDriver(BaseDataDriver,
PartitioningDataDriverMixin,
ProvisioningDataDriverMixin,
ConfigDriveDataDriverMixin,
GrubBootloaderDataDriverMixin,
MultibootDeploymentMixin):
def __init__(self, data):
super(GenericDataDriver, self).__init__(data)
# this var states whether boot partition
# was already allocated on first matching volume
# or not
self._boot_partition_done = False
# this var is used as a flag that /boot fs
# has already been added. we need this to
# get rid of md over all disks for /boot partition.
self._boot_done = False
@property
def partition_scheme(self):
if not hasattr(self, '_partition_scheme'):
self._partition_scheme = self._get_partition_scheme()
return self._partition_scheme
@property
def hw_partition_scheme(self):
raise NotImplementedError
@property
def partitions_policy(self):
"""Returns string"""
raise NotImplementedError
@property
def image_scheme(self):
if not hasattr(self, '_image_scheme'):
self._image_scheme = self._get_image_scheme()
return self._image_scheme
@property
def image_meta(self):
if not hasattr(self, '_image_meta'):
self._image_meta = self._get_image_meta()
return self._image_meta
@property
def grub(self):
if not hasattr(self, '_grub'):
self._grub = self._get_grub()
return self._grub
@property
def operating_system(self):
if not hasattr(self, '_operating_system'):
self._operating_system = self._get_operating_system()
return self._operating_system
@property
def configdrive_scheme(self):
if not hasattr(self, '_configdrive_scheme'):
self._configdrive_scheme = self._get_configdrive_scheme()
return self._configdrive_scheme
@property
def is_configdrive_needed(self):
raise NotImplementedError
def create_configdrive(self):
if self.is_configdrive_needed:
self._create_configdrive()
def _get_partition_scheme(self):
raise NotImplementedError
def _get_image_scheme(self):
raise NotImplementedError
def _get_image_meta(self):
raise NotImplementedError
def _get_grub(self):
raise NotImplementedError
def _get_operating_system(self):
raise NotImplementedError
def _get_configdrive_scheme(self):
raise NotImplementedError
def _create_configdrive(self):
raise NotImplementedError
def _add_configdrive_image(self):
configdrive_device = self.partition_scheme.configdrive_device()
if configdrive_device is None:
raise errors.WrongPartitionSchemeError(
'Error while trying to get configdrive device: '
'configdrive device not found')
size = os.path.getsize(CONF.config_drive_path)
md5 = utils.calculate_md5(CONF.config_drive_path, size)
self.image_scheme.add_image(
uri='file://%s' % CONF.config_drive_path,
target_device=configdrive_device,
format='iso9660',
container='raw',
size=size,
md5=md5,
)
@property
def _ks_disks(self):
return filter(lambda x: x['type'] == 'disk' and x['size'] > 0,
self._partition_data())
@property
def _ks_vgs(self):
return filter(lambda x: x['type'] == 'vg', self._partition_data())
def _getlabel(self, label):
if not label:
return ''
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return ' -L {0} '.format(label[:12])
@property
def _small_ks_disks(self):
"""Get those disks which are smaller than 2T"""
return [d for d in self._ks_disks if d['size'] <= 2 * 1024 * 1024]
def get_os_ids(self):
raise NotImplementedError

View File

@ -0,0 +1,754 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import fnmatch
import math
import os
from oslo_config import cfg
from bareon.drivers.data.generic import GenericDataDriver
from bareon.drivers.data import ks_spaces_validator
from bareon import errors
from bareon import objects
from bareon.openstack.common import log as logging
from bareon.utils import hardware as hu
from bareon.utils import partition as pu
from bareon.utils import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_LVM_META_SIZE = 64
DEFAULT_GRUB_SIZE = 24
class Ironic(GenericDataDriver):
def __init__(self, data):
super(Ironic, self).__init__(data)
self._root_on_lvm = None
self._boot_on_lvm = None
def _get_image_meta(self):
pass
def _get_image_scheme(self):
LOG.debug('--- Preparing image schema ---')
data = self.data
image_schema = objects.ImageScheme()
image_list = data['images']
deployment_flags = data.get('image_deploy_flags', {})
image_schema.images = [objects.Image(uri=image['image_pull_url'],
target_device=image['target'],
format='bare',
container='raw',
os_id=image['name'],
os_boot=image.get('boot', False),
image_name=image.get('image_name',
''),
image_uuid=image.get('image_uuid',
''),
deployment_flags=deployment_flags)
for image in image_list]
return image_schema
def get_os_ids(self):
images = set([image.os_id for image in self.image_scheme.images])
partitions = set([id
for fs in self.partition_scheme.fss
for id in fs.os_id])
return images & partitions
def get_image_ids(self):
return [image.os_id for image in self.image_scheme.images]
@property
def is_multiboot(self):
return True if len(self.get_image_ids()) > 1 else False
@property
def is_configdrive_needed(self):
return False
@property
def hw_partition_scheme(self):
if not hasattr(self, '_hw_partition_scheme'):
self._hw_partition_scheme = self._get_hw_partition_schema()
return self._hw_partition_scheme
@property
def partitions_policy(self):
if not hasattr(self, '_partitions_policy'):
self._partitions_policy = self.data.get('partitions_policy',
'verify')
return self._partitions_policy
@property
def root_on_lvm(self):
return self.partition_scheme and self._root_on_lvm
@property
def boot_on_lvm(self):
no_separate_boot = (self.partition_scheme.fs_by_mount('/boot') is None)
return ((no_separate_boot and self.root_on_lvm) or
self._boot_on_lvm)
def _partition_data(self):
return self.data['partitions']
def _get_partition_scheme(self):
"""Reads disk/partitions volumes/vgs from given deploy_config
Translating different ids (name, path, scsi) to name via
scanning/comparing the underlying node hardware.
"""
LOG.debug('--- Preparing partition scheme ---')
# TODO(oberezovskyi): make validator work
data = self._partition_data()
ks_spaces_validator.validate(data, 'ironic')
data = convert_size(data)
partition_schema = objects.PartitionScheme()
multiboot_installed = False
LOG.debug('Looping over all disks in provision data')
for disk in self._ks_disks:
# # skipping disk if there are no volumes with size >0
# # to be allocated on it which are not boot partitions
if all((v["size"] <= 0 for v in disk["volumes"] if
v.get("mount") != "/boot")):
continue
LOG.debug('Processing disk type:%s id:%s' % (
disk['id']['type'], disk['id']['value']))
LOG.debug('Adding gpt table on disk type:%s id:%s' % (
disk['id']['type'], disk['id']['value']))
parted = partition_schema.add_parted(
name=self._disk_dev(disk), label='gpt', disk_size=disk['size'])
# TODO(lobur): do not add partitions implicitly, they may fail
# partition verification
parted.add_partition(size=DEFAULT_GRUB_SIZE, flags=['bios_grub'])
if self.is_multiboot and not multiboot_installed:
multiboot_installed = True
multiboot_partition = parted.add_partition(size=100)
partition_schema.add_fs(device=multiboot_partition.name,
mount='multiboot', fs_type='ext4',
fstab_enabled=False, os_id=[])
LOG.debug('Looping over all volumes on disk type:%s id:%s' % (
disk['id']['type'], disk['id']['value']))
for volume in disk['volumes']:
LOG.debug('Processing volume: '
'name=%s type=%s size=%s mount=%s vg=%s '
'keep_data=%s' %
(volume.get('name'), volume.get('type'),
volume.get('size'), volume.get('mount'),
volume.get('vg'), volume.get('keep_data')))
if volume['size'] <= 0:
LOG.debug('Volume size is zero. Skipping.')
continue
FUNC_MAP = {
'partition': self._process_partition,
'raid': self._process_raid,
'pv': self._process_pv
}
FUNC_MAP[volume['type']](volume, disk, parted,
partition_schema)
LOG.debug('Looping over all volume groups in provision data')
for vg in self._ks_vgs:
self._process_vg(vg, partition_schema)
partition_schema.elevate_keep_data()
return partition_schema
def _process_partition(self, volume, disk, parted, partition_schema):
partition = self._add_partition(volume, disk, parted)
if 'partition_guid' in volume:
LOG.debug('Setting partition GUID: %s' %
volume['partition_guid'])
partition.set_guid(volume['partition_guid'])
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Adding file system on partition: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'xfs')))
partition_schema.add_fs(
device=partition.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')),
fstab_options=volume.get('fstab_options', 'defaults'),
fstab_enabled=volume.get('fstab_enabled', True),
os_id=volume.get('images', self.get_image_ids()[:1]),
)
parted.install_bootloader = True
if volume['mount'] == '/boot' and not self._boot_done:
self._boot_done = True
def _process_pv(self, volume, disk, parted, partition_schema):
partition = self._add_partition(volume, disk, parted)
LOG.debug('Creating pv on partition: pv=%s vg=%s' %
(partition.name, volume['vg']))
lvm_meta_size = volume.get('lvm_meta_size', DEFAULT_LVM_META_SIZE)
# The reason for that is to make sure that
# there will be enough space for creating logical volumes.
# Default lvm extension size is 4M. Nailgun volume
# manager does not care of it and if physical volume size
# is 4M * N + 3M and lvm metadata size is 4M * L then only
# 4M * (N-L) + 3M of space will be available for
# creating logical extensions. So only 4M * (N-L) of space
# will be available for logical volumes, while nailgun
# volume manager might reguire 4M * (N-L) + 3M
# logical volume. Besides, parted aligns partitions
# according to its own algorithm and actual partition might
# be a bit smaller than integer number of mebibytes.
if lvm_meta_size < 10:
raise errors.WrongPartitionSchemeError(
'Error while creating physical volume: '
'lvm metadata size is too small')
metadatasize = int(math.floor((lvm_meta_size - 8) / 2))
metadatacopies = 2
partition_schema.vg_attach_by_name(
pvname=partition.name, vgname=volume['vg'],
metadatasize=metadatasize,
metadatacopies=metadatacopies)
def _process_raid(self, volume, disk, parted, partition_schema):
partition = self._add_partition(volume, disk, parted)
if not partition:
return
if 'mount' in volume and volume['mount'] not in ('none', '/boot'):
LOG.debug('Attaching partition to RAID '
'by its mount point %s' % volume['mount'])
partition_schema.md_attach_by_mount(
device=partition.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
if 'mount' in volume and volume['mount'] == '/boot' and \
not self._boot_done:
LOG.debug('Adding file system on partition: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'ext2')))
partition_schema.add_fs(
device=partition.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'ext2'),
fs_label=self._getlabel(volume.get('disk_label')),
fstab_options=volume.get('fstab_options', 'defaults'),
fstab_enabled=volume.get('fstab_enabled', True),
os_id=volume.get('images', self.get_image_ids()[:1]),
)
parted.install_bootloader = True
self._boot_done = True
def _process_vg(self, volume_group, partition_schema):
LOG.debug('Processing vg %s' % volume_group['id'])
LOG.debug(
'Looping over all logical volumes in vg %s' % volume_group['id'])
for volume in volume_group['volumes']:
LOG.debug('Processing lv %s' % volume['name'])
if volume['size'] <= 0:
LOG.debug('LogicalVolume size is zero. Skipping.')
continue
if volume['type'] == 'lv':
LOG.debug('Adding lv to vg %s: name=%s, size=%s' %
(volume_group['id'], volume['name'], volume['size']))
lv = partition_schema.add_lv(name=volume['name'],
vgname=volume_group['id'],
size=volume['size'])
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Adding file system on lv: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'xfs')))
partition_schema.add_fs(
device=lv.device_name,
mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')),
fstab_options=volume.get('fstab_options',
'defaults'),
fstab_enabled=volume.get('fstab_enabled', True),
os_id=volume.get('images', self.get_image_ids()[:1]),
)
lv_path = "%s/%s" % (volume_group['id'], volume['name'])
if volume['mount'] == '/':
self._root_on_lvm = lv_path
elif volume['mount'] == '/boot':
self._boot_on_lvm = lv_path
def _add_partition(self, volume, disk, parted):
partition = None
if volume.get('mount') != '/boot':
LOG.debug('Adding partition on disk %s: size=%s' % (
disk['id']['value'], volume['size']))
partition = parted.add_partition(
size=volume['size'],
keep_data=self._get_keep_data_flag(volume))
LOG.debug('Partition name: %s' % partition.name)
elif volume.get('mount') == '/boot' \
and not self._boot_partition_done \
and (disk in self._small_ks_disks or not self._small_ks_disks):
# NOTE(kozhukalov): On some hardware GRUB is not able
# to see disks larger than 2T due to firmware bugs,
# so we'd better avoid placing /boot on such
# huge disks if it is possible.
LOG.debug('Adding /boot partition on disk %s: '
'size=%s', disk['id']['value'], volume['size'])
partition = parted.add_partition(
size=volume['size'],
keep_data=self._get_keep_data_flag(volume))
LOG.debug('Partition name: %s', partition.name)
self._boot_partition_done = True
else:
LOG.debug('No need to create partition on disk %s. '
'Skipping.', disk['id']['value'])
return partition
def _get_keep_data_flag(self, volume):
# For the new policy-based driver the default is True
return volume.get('keep_data', True)
def _get_hw_partition_schema(self):
"""Reads disks/partitions from underlying hardware.
Does not rely on deploy_config
"""
# NOTE(lobur): Only disks/partitions currently supported.
# No vgs/volumes
LOG.debug('--- Reading HW partition scheme from the node ---')
partition_schema = objects.PartitionScheme()
disk_infos = [pu.info(disk['name']) for disk in self.hu_disks]
fstab = self._find_hw_fstab()
LOG.debug('Scanning all disks on the node')
for disk_info in disk_infos:
parted = partition_schema.add_parted(
name=disk_info['generic']['dev'],
label=disk_info['generic']['table'],
install_bootloader=disk_info['generic']['has_bootloader']
)
LOG.debug('Scanning all partitions on disk %s '
% disk_info['generic']['dev'])
for part in disk_info['parts']:
if part.get('fstype', '') == 'free':
LOG.debug('Skipping a free partition at:'
'begin=%s, end=%s' %
(part.get('begin'), part.get('end')))
continue
LOG.debug('Adding partition: '
'name=%s size=%s to hw schema' %
(part.get('disk_dev'), part.get('size')))
# NOTE(lobur): avoid use of parted.add_partition to omit
# counting logic; use real data instead.
partition = objects.Partition(
name=part.get('name'),
count=part.get('num'),
device=part.get('disk_dev'),
begin=part.get('begin'),
end=part.get('end'),
partition_type=part.get('type'),
flags=part.get('flags')
)
parted.partitions.append(partition)
mnt_point = self._get_mount_point_from_fstab(fstab,
part['uuid'])
if mnt_point:
LOG.debug('Adding filesystem: '
'device=%s fs_type=%s mount_point=%s '
'to hw schema' %
(part.get('name'), part.get('fstype'),
mnt_point))
partition_schema.add_fs(device=part.get('name'),
mount=mnt_point,
fs_type=part.get('fstype', ''))
else:
LOG.warning("Not adding %s filesystem to hw_schema because"
" it has no mount point in fstab"
% part.get('name'))
return partition_schema
def _find_hw_fstab(self):
mount_dir = '/mnt'
fstabs = []
fstab_fss = filter(lambda fss: fss.mount == '/',
self.partition_scheme.fss)
for fss in fstab_fss:
fss_dev = fss.device
fstab_path = os.path.join(mount_dir, 'etc', 'fstab')
try:
utils.execute('mount', fss_dev, mount_dir, run_as_root=True,
check_exit_code=[0])
fstab, _ = utils.execute('cat', fstab_path,
run_as_root=True,
check_exit_code=[0])
utils.execute('umount', mount_dir, run_as_root=True)
except errors.ProcessExecutionError as e:
raise errors.HardwarePartitionSchemeCannotBeReadError(
"Cannot read fstab from %s partition. Error occurred: %s"
% (fss_dev, str(e))
)
LOG.info("fstab has been found on %s:\n%s" % (fss_dev, fstab))
fstabs.append(fstab)
return '\n'.join(fstabs)
def _get_mount_point_from_fstab(self, fstab, part_uuid):
res = None
if not part_uuid:
return res
for line in fstab.splitlines():
# TODO(lobur): handle fstab written using not partition UUID
if part_uuid in line:
res = line.split()[1]
break
return res
def _match_device(self, hu_disk, ks_disk):
"""Check if hu_disk and ks_disk are the same device
Tries to figure out if hu_disk got from hu.list_block_devices
and ks_spaces_disk given correspond to the same disk device. This
is the simplified version of hu.match_device
:param hu_disk: A dict representing disk device how
it is given by list_block_devices method.
:param ks_disk: A dict representing disk device according to
ks_spaces format.
:returns: True if hu_disk matches ks_spaces_disk else False.
"""
id_type = ks_disk['id']['type']
id_value = ks_disk['id']['value']
if isinstance(hu_disk.get(id_type), (list, tuple)):
return any((id_value in value for value in hu_disk[id_type]))
else:
return id_value in hu_disk[id_type]
@property
def hu_disks(self):
"""Actual disks which are available on this node
It is a list of dicts which are formatted other way than
ks_spaces disks. To match both of those formats use
_match_device method.
"""
if not getattr(self, '_hu_disks', None):
self._hu_disks = self._get_block_devices()
return self._hu_disks
@property
def hu_vgs(self):
"""Actual disks which are available on this node
It is a list of dicts which are formatted other way than
ks_spaces disks. To match both of those formats use
_match_data_by_pattern method.
"""
if not getattr(self, '_hu_vgs', None):
self._hu_vgs = self._get_vg_devices()
return self._hu_vgs
def _get_vg_devices(self):
devices = hu.get_vg_devices_from_udev_db()
vg_dev_infos = []
for device in devices:
vg_dev_infos.append(self._get_block_device_info(device))
return vg_dev_infos
def _get_block_devices(self):
# Extends original result of hu.get_device_info with hu.get_device_ids
# and add scsi param.
devices = hu.get_block_devices_from_udev_db()
block_dev_infos = []
for device in devices:
block_dev_infos.append(self._get_block_device_info(device))
return block_dev_infos
def _get_block_device_info(self, device):
device_info = {
'name': device,
'scsi': hu.scsi_address(device)
}
hu_device_info = hu.get_device_info(device)
if hu_device_info:
device_info.update(hu_device_info)
ids = hu.get_device_ids(device)
if not ids:
# DEVLINKS not presented on virtual environment.
# Let's keep it here for development purpose.
devpath = device_info.get('uspec', {}).get('DEVPATH')
if devpath:
ids = [devpath]
device_info['path'] = ids
return {k: v for k, v in device_info.iteritems() if v}
def _get_grub(self):
LOG.debug('--- Parse grub settings ---')
grub = objects.Grub()
kernel_params = self.data.get('deploy_data', {}).get(
'kernel_params', '')
# NOTE(lobur): Emulating ipappend 2 to allow early network-based
# initialization during tenant image boot.
bootif = utils.parse_kernel_cmdline().get("BOOTIF")
if bootif:
kernel_params += " BOOTIF=%s" % bootif
if kernel_params:
LOG.debug('Setting initial kernel parameters: %s',
kernel_params)
grub.kernel_params = kernel_params
return grub
def _disk_dev(self, ks_disk):
# first we try to find a device that matches ks_disk
# comparing by-id and by-path links
matched = [hu_disk['name'] for hu_disk in self.hu_disks
if self._match_device(hu_disk, ks_disk)]
# if we can not find a device by its by-id and by-path links
if not matched or len(matched) > 1:
raise errors.DiskNotFoundError(
'Disk not found with %s: %s' % (
ks_disk['id']['type'], ks_disk['id']['value']))
return matched[0]
def _disk_vg_dev(self, ks_vgs):
# first we try to find a device that matches ks_disk
# comparing by-id and by-path links
matched = [hu_vg['name'] for hu_vg in self.hu_vgs
if self._match_data_by_pattern(hu_vg, ks_vgs)]
# if we can not find a device by its by-id and by-path links
if not matched or len(matched) > 1:
raise errors.DiskNotFoundError(
'Disk not found with %s: %s' % (
ks_vgs['id']['type'], ks_vgs['id']['value']))
return matched[0]
def _get_device_ids(self, dev_type):
device_ids = []
if dev_type == hu.DISK:
devs = hu.get_block_devices_from_udev_db()
elif dev_type == hu.PARTITION:
devs = hu.get_partitions_from_udev_db()
for dev in devs:
ids = hu.get_device_ids(dev)
if ids:
device_ids.append(ids)
return device_ids
@property
def hu_partitions(self):
if not getattr(self, '_hu_partitions', None):
self._hu_partitions = self._get_device_ids(dev_type=hu.PARTITION)
return self._hu_partitions
def _disk_partition(self, ks_partition):
matched = [hu_partition['name'] for hu_partition in self.hu_partitions
if self._match_data_by_pattern(hu_partition, ks_partition)]
if not matched or len(matched) > 1:
raise errors.DiskNotFoundError(
'Disk not found with %s: %s' % (
ks_partition['id']['type'], ks_partition['id']['value']))
return matched[0]
def _match_data_by_pattern(self, hu_data, ks_data):
id_type = ks_data['id']['type']
id_value = ks_data['id']['value']
if isinstance(hu_data.get(id_type), (list, tuple)):
return any((fnmatch.fnmatch(value, id_value) for value in
hu_data.get(id_type, [])))
else:
return fnmatch.fnmatch(hu_data.get(id_type, ''), id_value)
def convert_size(data):
data = convert_string_sizes(data)
data = _resolve_all_sizes(data)
return data
def _resolve_all_sizes(data):
# NOTE(oberezovskyi): "disks" should be processed before "vgs"
disks = filter(lambda space: space['type'] == 'disk', data)
disks = _resolve_sizes(disks)
vgs = filter(lambda space: space['type'] == 'vg', data)
_set_vg_sizes(vgs, disks)
vgs = _resolve_sizes(vgs, retain_space_size=False)
return disks + vgs
def _set_vg_sizes(vgs, disks):
pvs = []
for disk in disks:
pvs += [vol for vol in disk['volumes'] if vol['type'] == 'pv']
vg_sizes = defaultdict(int)
for pv in pvs:
vg_sizes[pv['vg']] += pv['size'] - pv.get(
'lvm_meta_size', DEFAULT_LVM_META_SIZE)
for vg in vgs:
vg['size'] = vg_sizes[vg['id']]
def _convert_percentage_sizes(space, size):
for volume in space['volumes']:
if isinstance(volume['size'], basestring) and '%' in volume['size']:
# NOTE(lobur): decimal results of % conversion are floored.
volume['size'] = size * int(volume['size'].split('%')[0]) // 100
def _get_disk_id(disk):
if isinstance(disk['id'], dict):
return '{}: {}'.format(disk['id']['type'],
disk['id']['value'])
return disk['id']
def _get_space_size(space, retain_size):
if not space.get('size'):
raise ValueError('Size of {type} "{id}" is not '
'specified'.format(type=space['type'],
id=_get_disk_id(space)))
return space['size'] if retain_size else space.pop('size')
def _process_space_claims(space):
claimed_space = 0
unsized_volume = None
for volume in space['volumes']:
if (isinstance(volume['size'], basestring) and
volume['size'] == 'remaining'):
if not unsized_volume:
unsized_volume = volume
else:
raise ValueError('Detected multiple volumes attempting to '
'claim remaining size {type} "{id}"'
''.format(type=space['type'],
id=_get_disk_id(space)))
else:
claimed_space += volume['size']
return claimed_space, unsized_volume
def _resolve_sizes(spaces, retain_space_size=True):
for space in spaces:
space_size = _get_space_size(space, retain_space_size)
# NOTE(oberezovskyi): DEFAULT_GRUB_SIZE is size of grub stage 1.5
# (bios_grub) partition
taken_space = DEFAULT_GRUB_SIZE if space['type'] == 'disk' else 0
_convert_percentage_sizes(space, space_size)
claimed_space, unsized_volume = _process_space_claims(space)
taken_space += claimed_space
delta = space_size - taken_space
if delta < 0:
raise ValueError('Sum of requested filesystem sizes exceeds space '
'available on {type} "{id}" by {delta} '
'MiB'.format(delta=abs(delta), type=space['type'],
id=_get_disk_id(space)))
elif unsized_volume:
ref = (unsized_volume['mount'] if unsized_volume.get(
'mount') else unsized_volume.get('pv'))
if delta:
LOG.info('Claiming remaining {delta} MiB for {ref} '
'volume/partition on {type} {id}.'
''.format(delta=abs(delta),
type=space['type'],
id=_get_disk_id(space),
ref=ref))
unsized_volume['size'] = delta
else:
raise ValueError(
'Volume/partition {ref} requested all remaining space, '
'but no unclaimed space remains on {type} {id}'.format(
type=space['type'],
id=_get_disk_id(space),
ref=ref))
else:
LOG.info('{delta} MiB of unclaimed space remains on {type} "{id}" '
'after completing allocations.'.format(delta=abs(delta),
type=space['type'],
id=_get_disk_id(
space)))
return spaces
def convert_string_sizes(data):
if isinstance(data, (list, tuple)):
return [convert_string_sizes(el) for el in data]
if isinstance(data, dict):
for k, v in data.items():
if (isinstance(v, basestring) and
any(x in v for x in ('%', 'remaining'))):
continue
if k in ('size', 'lvm_meta_size'):
data[k] = human2bytes(v)
else:
data[k] = convert_string_sizes(v)
return data
def human2bytes(value, default='MiB', target='MiB'):
symbols = {'custom': ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'),
'iec': ('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')}
bytes = {}
bytes.update({e: 1000.0 ** n for n, e in enumerate(symbols['custom'])})
bytes.update({e: 1024.0 ** n for n, e in enumerate(symbols['iec'], 1)})
try:
number = ''
prefix = default
for index, letter in enumerate(value):
if letter and letter.isdigit() or letter == '.':
number += letter
else:
if value[index] == ' ':
index += 1
prefix = value[index:]
break
return int(float(number) * bytes[prefix] / bytes[target])
except Exception as ex:
raise ValueError('Can\'t convert size %s. Error: %s' % (value, ex))

View File

@ -0,0 +1,198 @@
{
"uniqueItems": true,
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"title": "Ironic partition schema",
"minItems": 1,
"items": {
"anyOf": [
{
"required": [
"type",
"id",
"volumes",
"size"
],
"type": "object",
"properties": {
"name": {
"type": "string"
},
"volumes": {
"items": {
"anyOf": [
{
"required": [
"type",
"size",
"vg"
],
"type": "object",
"properties": {
"vg": {
"type": "string"
},
"type": {
"enum": [
"pv"
]
},
"lvm_meta_size": {
"type": "string"
},
"size": {
"type": "string"
}
}
},
{
"required": [
"type",
"size"
],
"type": "object",
"properties": {
"mount": {
"type": "string"
},
"type": {
"enum": [
"raid",
"partition"
]
},
"file_system": {
"type": "string"
},
"name": {
"type": "string"
},
"size": {
"type": "string"
}
}
},
{
"required": [
"type",
"size"
],
"type": "object",
"properties": {
"type": {
"enum": [
"boot"
]
},
"size": {
"type": "string"
}
}
},
{
"required": [
"type",
"size"
],
"type": "object",
"properties": {
"type": {
"enum": [
"lvm_meta_pool"
]
},
"size": {
"type": "string"
}
}
}
]
},
"type": "array"
},
"type": {
"enum": [
"disk"
]
},
"id": {
"required": [
"type",
"value"
],
"type": "object",
"properties": {
"type": {
"enum": [
"scsi",
"path",
"name"
]
}
}
},
"size": {
"type": "string"
}
}
},
{
"required": [
"type",
"id",
"volumes"
],
"type": "object",
"properties": {
"_allocate_size": {
"type": "string"
},
"label": {
"type": "string"
},
"min_size": {
"type": "integer"
},
"volumes": {
"items": {
"required": [
"type",
"size",
"name"
],
"type": "object",
"properties": {
"mount": {
"type": "string"
},
"type": {
"enum": [
"lv"
]
},
"name": {
"type": "string"
},
"file_system": {
"type": "string"
},
"size": {
"type": "string"
}
}
},
"type": "array"
},
"type": {
"enum": [
"vg"
]
},
"id": {
"type": "string"
}
}
}
]
}
}

View File

@ -0,0 +1,99 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Partition scheme",
"type": "array",
"minItems": 1,
"uniqueItems": true,
"items": {
"anyOf": [
{
"type": "object",
"required": ["type", "id", "volumes", "name",
"size", "extra", "free_space"],
"properties": {
"type": {"enum": ["disk"]},
"id": {"type": "string"},
"name": {"type": "string"},
"size": {"type": "integer"},
"free_space": {"type": "integer"},
"extra": {
"type": "array",
"items": {"type": "string"}
},
"volumes": {
"type": "array",
"items": {
"anyOf": [
{
"type": "object",
"required": ["type", "size",
"lvm_meta_size", "vg"],
"properties": {
"type": {"enum": ["pv"]},
"size": {"type": "integer"},
"lvm_meta_size": {"type": "integer"},
"vg": {"type": "string"}
}
},
{
"type": "object",
"required": ["type", "size"],
"properties": {
"type": {"enum": ["raid",
"partition"]},
"size": {"type": "integer"},
"mount": {"type": "string"},
"file_system": {"type": "string"},
"name": {"type": "string"}
}
},
{
"type": "object",
"required": ["type", "size"],
"properties": {
"type": {"enum": ["boot"]},
"size": {"type": "integer"}
}
},
{
"type": "object",
"required": ["type", "size"],
"properties": {
"type": {"enum": ["lvm_meta_pool"]},
"size": {"type": "integer"}
}
}
]
}
}
}
},
{
"type": "object",
"required": ["type", "id", "volumes"],
"properties": {
"type": {"enum": ["vg"]},
"id": {"type": "string"},
"label": {"type": "string"},
"min_size": {"type": "integer"},
"_allocate_size": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"type": "object",
"required": ["type", "size", "name"],
"properties": {
"type": {"enum": ["lv"]},
"size": {"type": "integer"},
"name": {"type": "string"},
"mount": {"type": "string"},
"file_system": {"type": "string"}
}
}
}
}
}
]
}
}

View File

@ -0,0 +1,56 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import jsonschema
import os
from bareon import errors
from bareon.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def validate(data, schema_file='nailgun'):
"""Validates a given partition scheme using jsonschema.
:param scheme: partition scheme to validate
"""
base_path = os.path.dirname(__file__)
schemas_path = os.path.join(base_path, 'json_schemes')
with open(os.path.join(schemas_path, '%s.json' % schema_file)) as file:
schema = json.load(file)
try:
checker = jsonschema.FormatChecker()
jsonschema.validate(data, schema,
format_checker=checker)
except Exception as exc:
LOG.exception(exc)
raise errors.WrongPartitionSchemeError(str(exc))
# scheme is not valid if the number of disks is 0
if not [d for d in data if d['type'] == 'disk']:
raise errors.WrongPartitionSchemeError(
'Partition scheme seems empty')
# TODO(lobur): Must be done after unit conversion
# for space in data:
# for volume in space.get('volumes', []):
# if volume['size'] > 16777216 and volume.get('mount') == '/':
# raise errors.WrongPartitionSchemeError(
# 'Root file system must be less than 16T')
# TODO(kozhukalov): need to have additional logical verifications
# maybe sizes and format of string values

View File

@ -23,24 +23,25 @@ from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import urlsplit
import yaml
from bareon.drivers.base import BaseDataDriver
from bareon.drivers.base import ConfigDriveDataDriverMixin
from bareon.drivers.base import GrubBootloaderDataDriverMixin
from bareon.drivers.base import PartitioningDataDriverMixin
from bareon.drivers.base import ProvisioningDataDriverMixin
from bareon.drivers import ks_spaces_validator
from bareon import errors
from bareon import objects
from bareon.openstack.common import log as logging
from bareon.utils import hardware as hu
from bareon.utils import utils
from bareon.drivers.data.base import BaseDataDriver
from bareon.drivers.data.base import ConfigDriveDataDriverMixin
from bareon.drivers.data.base import GrubBootloaderDataDriverMixin
from bareon.drivers.data.base import PartitioningDataDriverMixin
from bareon.drivers.data.base import ProvisioningDataDriverMixin
from bareon.drivers.data import ks_spaces_validator
from bareon import errors
from bareon import objects
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('prepare_configdrive', 'bareon.manager')
CONF.import_opt('config_drive_path', 'bareon.manager')
CONF.import_opt('prepare_configdrive', 'bareon.drivers.deploy.nailgun')
CONF.import_opt('config_drive_path', 'bareon.drivers.deploy.nailgun')
def match_device(hu_disk, ks_disk):
@ -108,10 +109,18 @@ class Nailgun(BaseDataDriver,
def partition_scheme(self):
return self._partition_scheme
@property
def hw_partition_scheme(self):
return None
@property
def image_scheme(self):
return self._image_scheme
@property
def image_meta(self):
return None
@property
def grub(self):
return self._grub
@ -443,10 +452,11 @@ class Nailgun(BaseDataDriver,
disk['name'])
parted.add_partition(size=20, configdrive=True)
# checking if /boot is created
if not self._boot_partition_done or not self._boot_done:
raise errors.WrongPartitionSchemeError(
'/boot partition has not been created for some reasons')
# TODO(lobur): port https://review.openstack.org/#/c/261562/ to fix
# # checking if /boot is created
# if not self._boot_partition_done or not self._boot_done:
# raise errors.WrongPartitionSchemeError(
# '/boot partition has not been created for some reasons')
LOG.debug('Looping over all volume groups in provision data')
for vg in self.ks_vgs:
@ -616,33 +626,11 @@ class Nailgun(BaseDataDriver,
return image_scheme
class Ironic(Nailgun):
def __init__(self, data):
super(Ironic, self).__init__(data)
def parse_configdrive_scheme(self):
pass
def parse_partition_scheme(self):
# FIXME(yuriyz): Using of internal attributes of base class is very
# fragile. This code acts only as temporary solution. Ironic should
# use own driver, based on simple driver.
self._boot_partition_done = True
self._boot_done = True
return super(Ironic, self).parse_partition_scheme()
class NailgunBuildImage(BaseDataDriver,
ProvisioningDataDriverMixin,
ConfigDriveDataDriverMixin,
GrubBootloaderDataDriverMixin):
# TODO(kozhukalov):
# This list of packages is used by default only if another
# list isn't given in build image data. In the future
# we need to handle package list in nailgun. Even more,
# in the future, we'll be building not only ubuntu images
# and we'll likely move this list into some kind of config.
DEFAULT_TRUSTY_PACKAGES = [
"acl",
"anacron",
@ -688,6 +676,12 @@ class NailgunBuildImage(BaseDataDriver,
"vlan",
]
# TODO(kozhukalov):
# This list of packages is used by default only if another
# list isn't given in build image data. In the future
# we need to handle package list in nailgun. Even more,
# in the future, we'll be building not only ubuntu images
# and we'll likely move this list into some kind of config.
def __init__(self, data):
super(NailgunBuildImage, self).__init__(data)
self._image_scheme = objects.ImageScheme()
@ -696,6 +690,9 @@ class NailgunBuildImage(BaseDataDriver,
self.parse_schemes()
self._operating_system = self.parse_operating_system()
def image_meta(self):
pass
@property
def partition_scheme(self):
return self._partition_scheme

View File

@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from bareon.drivers import nailgun
from bareon.drivers.data import nailgun
from bareon import objects

View File

View File

@ -0,0 +1,54 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class BaseDeployDriver(object):
"""Deploy driver API"""
def __init__(self, data_driver):
self.driver = data_driver
@abc.abstractmethod
def do_partitioning(self):
"""Partitions storage devices"""
@abc.abstractmethod
def do_configdrive(self):
"""Adds configdrive"""
@abc.abstractmethod
def do_copyimage(self):
"""Provisions tenant image"""
@abc.abstractmethod
def do_reboot(self):
"""Reboots node"""
@abc.abstractmethod
def do_provisioning(self):
"""Provisions node"""
@abc.abstractmethod
def do_multiboot_bootloader(self):
"""Install MultiBoot Bootloader"""
@abc.abstractmethod
def do_install_os(self, os_id):
"""Generate fstab files"""

View File

@ -0,0 +1,606 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
from contextlib import contextmanager
from oslo_config import cfg
from bareon.drivers.deploy.base import BaseDeployDriver
from bareon import errors
from bareon.openstack.common import log as logging
from bareon.utils import fs as fu
from bareon.utils import grub as gu
from bareon.utils import lvm as lu
from bareon.utils import md as mu
from bareon.utils import partition as pu
from bareon.utils import utils
opts = [
cfg.StrOpt(
'udev_rules_dir',
default='/etc/udev/rules.d',
help='Path where to store actual rules for udev daemon',
),
cfg.StrOpt(
'udev_rules_lib_dir',
default='/lib/udev/rules.d',
help='Path where to store default rules for udev daemon',
),
cfg.StrOpt(
'udev_rename_substr',
default='.renamedrule',
help='Substring to which file extension .rules be renamed',
),
cfg.StrOpt(
'udev_empty_rule',
default='empty_rule',
help='Correct empty rule for udev daemon',
),
cfg.IntOpt(
'grub_timeout',
default=5,
help='Timeout in secs for GRUB'
),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = logging.getLogger(__name__)
# TODO(lobur): This driver mostly copies nailgun driver. Need to merge them.
class GenericDeployDriver(BaseDeployDriver):
def do_reboot(self):
LOG.debug('--- Rebooting node (do_reboot) ---')
utils.execute('reboot')
def do_provisioning(self):
LOG.debug('--- Provisioning (do_provisioning) ---')
self.do_partitioning()
self.do_configdrive()
map(self.do_install_os, self.driver.get_os_ids())
if self.driver.is_multiboot:
self.do_multiboot_bootloader()
LOG.debug('--- Provisioning END (do_provisioning) ---')
def do_partitioning(self):
LOG.debug('--- Partitioning disks (do_partitioning) ---')
PolicyPartitioner(self.driver).partition()
LOG.debug('--- Partitioning disks END (do_partitioning) ---')
def do_configdrive(self):
self.driver.create_configdrive()
def do_copyimage(self):
raise NotImplementedError
def do_install_os(self, os_id):
self.do_copyimage(os_id)
os_dir = '/tmp/target'
with self.mount_target(os_dir, os_id, treat_mtab=False):
if not self.driver.is_multiboot:
self.do_singleboot_bootloader(os_dir, os_id)
self.do_generate_fstab(os_dir, os_id)
def do_generate_fstab(self, os_path, os_id):
mount2uuid = self._mount2uuid(os_id, check_root=False)
if not os.path.exists(os.path.join(os_path, 'etc')):
LOG.info('Can\'t create fstab for {} image'.format(os_id))
return
with open(os.path.join(os_path, 'etc/fstab'), 'wb') as f:
for fs in self.driver.partition_scheme.fs_by_os_id(os_id):
f.write('{enabled}UUID={uuid} {mount} {fs} {options} '
'0 0\n'.format(enabled='' if fs.fstab_enabled else '#',
uuid=mount2uuid[fs.mount],
mount=fs.mount,
fs=fs.type,
options=fs.fstab_options))
def do_multiboot_bootloader(self):
install_devices = [d.name for d in self.driver.partition_scheme.parteds
if d.install_bootloader]
mount_dir = '/tmp/target'
with self._mount_bootloader(mount_dir) as uuid:
gu.grub2_install(install_devices, boot_root=mount_dir)
self._generate_boot_info(mount_dir, uuid)
def _mount2uuid(self, os_id, check_root=True):
mount2uuid = {}
for fs in self.driver.partition_scheme.fs_by_os_id(os_id):
mount2uuid[fs.mount] = pu.get_uuid(fs.device)
if check_root and '/' not in mount2uuid:
raise errors.WrongPartitionSchemeError(
'Error: device with / mountpoint has not been found')
return mount2uuid
def _uuid2osid(self, check_root=True):
uuid2image = {}
for os_id in self.driver.get_os_ids():
mount2uuid = self._mount2uuid(os_id, check_root=check_root)
uuid = mount2uuid.get('/', '')
if uuid:
uuid2image[uuid] = os_id
return uuid2image
def _get_multiboot_boot_image(self):
return next((image for image in self.driver.image_scheme.images if
image.os_boot), None)
def do_singleboot_bootloader(self, chroot, os_id):
grub = self.driver.grub
try:
guessed_version = gu.guess_grub_version(chroot=chroot)
except errors.GrubUtilsError as ex:
LOG.warning('Grub detection failed. Error: {}'.format(ex))
guessed_version = -1
if guessed_version != grub.version:
grub.version = guessed_version
LOG.warning('Grub version differs from which the operating system '
'should have by default. Found version in image: '
'{0}'.format(guessed_version))
if grub.version == 1 and self.driver.is_multiboot:
LOG.warning('Grub1 is being used in a multiboot deployment, '
'thus it is not guaranteed that image name "{}" will '
'be discovered by os-prober and appear in the common '
'grub.cfg'.format(os_id))
install_devices = [d.name for d in self.driver.partition_scheme.parteds
if d.install_bootloader]
if grub.version == 1:
mount2uuid = self._mount2uuid(os_id)
grub.append_kernel_params('root=UUID=%s ' % mount2uuid['/'])
GRUB_INSTALLERS = {1: self._do_bootloader_grub1,
2: self._do_bootloader_grub2,
-1: self._do_bootloader_grub2_bundled}
GRUB_INSTALLERS[grub.version](grub, chroot, install_devices,
self.driver.boot_on_lvm)
def _do_bootloader_grub1(self, grub, chroot, install_devices,
lvm_boot=False):
if lvm_boot:
raise NotImplementedError("Grub 1 does not support boot from LVM.")
# TODO(kozhukalov): implement which kernel to use by default
# Currently only grub1_cfg accepts kernel and initrd parameters.
boot_device = self.driver.partition_scheme.boot_device(grub.version)
kernel = grub.kernel_name or gu.guess_kernel(chroot=chroot,
regexp=grub.kernel_regexp)
initrd = grub.initrd_name or gu.guess_initrd(chroot=chroot,
regexp=grub.initrd_regexp)
gu.grub1_cfg(kernel=kernel, initrd=initrd,
kernel_params=grub.kernel_params, chroot=chroot,
grub_timeout=CONF.grub_timeout)
gu.grub1_install(install_devices, boot_device, chroot=chroot)
def _do_bootloader_grub2(self, grub, chroot, install_devices,
lvm_boot=False):
try:
gu.grub2_cfg(kernel_params=grub.kernel_params, chroot=chroot,
grub_timeout=CONF.grub_timeout, lvm_boot=lvm_boot)
gu.grub2_install(install_devices, chroot=chroot, lvm_boot=lvm_boot)
except errors.ProcessExecutionError as ex:
LOG.warning('Tenant grub2 install failed. Error: {}'.format(ex))
LOG.warning('Trying to install using bundled grub2')
self._do_bootloader_grub2_bundled(grub, chroot, install_devices,
lvm_boot=lvm_boot)
def _do_bootloader_grub2_bundled(self, grub, chroot, install_devices,
lvm_boot=False):
gu.grub2_install(install_devices, boot_root=chroot, lvm_boot=lvm_boot)
gu.grub2_cfg_bundled(kernel_params=grub.kernel_params,
chroot=chroot, grub_timeout=CONF.grub_timeout,
lvm_boot=lvm_boot)
def _generate_boot_info(self, chroot, uuid=None):
def list_of_seq_unique_by_key(seq, key):
seen = set()
seen_add = seen.add
return [x for x in seq if
x[key] not in seen and not seen_add(x[key])]
regex = re.compile('menuentry \'(?P<name>[^\']+)\'.*?search '
'.*?(?P<uuid>[0-9a-f\-]{36}).*?linux(?:16)? '
'(?P<kernel>.*?) .*?initrd(?:16)? '
'(?P<initrd>[^\n]*)', re.M | re.DOTALL)
entries = '''
set timeout=1
insmod part_gpt
insmod ext2
'''
boot_entry = '''
menuentry '{name}'{{
search --no-floppy --fs-uuid --set=root {uuid}
linux {kernel} root=UUID={uuid} ro {kernel_params}
initrd {initrd}
}}
'''
boot_elements = []
os.environ['GRUB_DISABLE_SUBMENU'] = 'y'
os_prober_entries = utils.execute('/etc/grub.d/30_os-prober')[0]
uuid2osid = self._uuid2osid(check_root=False)
for index, element in enumerate(re.finditer(regex, os_prober_entries)):
os_id = uuid2osid.get(element.group('uuid'), '')
if not os_id:
continue
image = self.driver.image_scheme.get_os_root(os_id)
entries += boot_entry.format(**{
'name': element.group('name'),
'uuid': element.group('uuid'),
'kernel': element.group('kernel'),
'initrd': element.group('initrd'),
'kernel_params': self.driver.data.get('deploy_data', {}).get(
'kernel_params', '')
})
boot_elements.append({
'boot_name': element.group('name'),
'root_uuid': element.group('uuid'),
'os_id': os_id,
'image_name': image.image_name,
'image_uuid': image.image_uuid,
'grub_id': index,
})
boot_elements = list_of_seq_unique_by_key(boot_elements, 'root_uuid')
boot_image = self._get_multiboot_boot_image()
if boot_image:
root_uuid = self._mount2uuid(boot_image.os_id)['/']
boot_id = next((element['grub_id'] for element in boot_elements if
element['root_uuid'] == root_uuid), 0)
entries += 'set default={}'.format(boot_id)
with open(os.path.join(chroot, 'boot', 'grub2', 'grub.cfg'),
'w') as conf:
conf.write(entries)
result = {'elements': boot_elements,
'multiboot_partition': uuid,
'current_element': boot_id}
with open('/tmp/boot_entries.json', 'w') as boot_entries_file:
json.dump(result, boot_entries_file)
def _mount_target(self, mount_dir, os_id, pseudo=True, treat_mtab=True):
LOG.debug('Mounting target file systems: %s', mount_dir)
# Here we are going to mount all file systems in partition schema.
for fs in self.driver.partition_scheme.fs_sorted_by_depth(os_id):
if fs.mount == 'swap':
continue
mount = os.path.join(mount_dir, fs.mount.strip(os.sep))
utils.makedirs_if_not_exists(mount)
fu.mount_fs(fs.type, str(fs.device), mount)
if pseudo:
for path in ('/sys', '/dev', '/proc'):
utils.makedirs_if_not_exists(
os.path.join(mount_dir, path.strip(os.sep)))
fu.mount_bind(mount_dir, path)
if treat_mtab:
mtab = utils.execute('chroot', mount_dir, 'grep', '-v', 'rootfs',
'/proc/mounts')[0]
mtab_path = os.path.join(mount_dir, 'etc/mtab')
if os.path.islink(mtab_path):
os.remove(mtab_path)
with open(mtab_path, 'wb') as f:
f.write(mtab)
def _umount_target(self, mount_dir, os_id, pseudo=True):
LOG.debug('Umounting target file systems: %s', mount_dir)
if pseudo:
for path in ('/proc', '/dev', '/sys'):
fu.umount_fs(os.path.join(mount_dir, path.strip(os.sep)),
try_lazy_umount=True)
for fs in self.driver.partition_scheme.fs_sorted_by_depth(os_id,
True):
if fs.mount == 'swap':
continue
fu.umount_fs(os.path.join(mount_dir, fs.mount.strip(os.sep)))
@contextmanager
def mount_target(self, mount_dir, os_id, pseudo=True, treat_mtab=True):
self._mount_target(mount_dir, os_id, pseudo=pseudo,
treat_mtab=treat_mtab)
try:
yield
finally:
self._umount_target(mount_dir, os_id, pseudo)
@contextmanager
def _mount_bootloader(self, mount_dir):
fs = filter(lambda fss: fss.mount == 'multiboot',
self.driver.partition_scheme.fss)
if len(fs) > 1:
raise errors.WrongPartitionSchemeError(
'Multiple multiboot partitions found')
utils.makedirs_if_not_exists(mount_dir)
fu.mount_fs(fs[0].type, str(fs[0].device), mount_dir)
yield pu.get_uuid(fs[0].device)
fu.umount_fs(mount_dir)
class PolicyPartitioner(object):
def __init__(self, driver):
self.driver = driver
def partition(self):
policy = self.driver.partitions_policy
LOG.debug("Using partitioning policy '%s'."
% self.driver.partitions_policy)
policy_handlers = {
"verify": self._handle_verify,
"clean": self._handle_clean,
"nailgun_legacy": self._handle_nailgun_legacy,
}
known_policies = policy_handlers.keys()
if policy not in known_policies:
raise errors.WrongPartitionPolicyError(
"'%s' policy is not one of known ones: %s"
% (policy, known_policies))
policy_handlers[policy]()
def _handle_verify(self):
provision_schema = self.driver.partition_scheme.to_dict()
hw_schema = self.driver.hw_partition_scheme.to_dict()
PartitionSchemaCompareTool().assert_no_diff(provision_schema,
hw_schema)
self._do_clean_filesystems()
@staticmethod
def _verify_disk_size(parteds, hu_disks):
for parted in parteds:
disks = [d for d in hu_disks if d.get('name') == parted.name]
if not disks:
raise errors.DiskNotFoundError(
'No physical disks found matching: %s' % parted.name)
disk_size_bytes = disks[0].get('bspec', {}).get('size64')
if not disk_size_bytes:
raise ValueError('Cannot read size of the disk: %s'
% disks[0].get('name'))
# It's safer to understate the physical disk size
disk_size_mib = utils.B2MiB(disk_size_bytes, ceil=False)
if parted.size > disk_size_mib:
raise errors.NotEnoughSpaceError(
'Partition scheme for: %(disk)s exceeds the size of the '
'disk. Scheme size is %(scheme_size)s MiB, and disk size '
'is %(disk_size)s MiB.' % {
'disk': parted.name, 'scheme_size': parted.size,
'disk_size': disk_size_mib})
def _handle_clean(self):
self._verify_disk_size(self.driver.partition_scheme.parteds,
self.driver.hu_disks)
self._do_partitioning()
def _handle_nailgun_legacy(self):
# Corresponds to nailgun behavior.
if self.driver.partition_scheme.skip_partitioning:
LOG.debug('Some of fs has keep_data (preserve) flag, '
'skipping partitioning')
self._do_clean_filesystems()
else:
LOG.debug('No keep_data (preserve) flag passed, wiping out all'
'disks and re-partitioning')
self._do_partitioning()
def _do_clean_filesystems(self):
# NOTE(agordeev): it turns out that only mkfs.xfs needs '-f' flag in
# order to force recreation of filesystem.
# This option will be added to mkfs.xfs call explicitly in fs utils.
# TODO(asvechnikov): need to refactor processing keep_flag logic when
# data model will become flat
for fs in self.driver.partition_scheme.fss:
if not fs.keep_data:
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
def _do_partitioning(self):
# If disks are not wiped out at all, it is likely they contain lvm
# and md metadata which will prevent re-creating a partition table
# with 'device is busy' error.
mu.mdclean_all()
lu.lvremove_all()
lu.vgremove_all()
lu.pvremove_all()
LOG.debug("Enabling udev's rules blacklisting")
utils.blacklist_udev_rules(udev_rules_dir=CONF.udev_rules_dir,
udev_rules_lib_dir=CONF.udev_rules_lib_dir,
udev_rename_substr=CONF.udev_rename_substr,
udev_empty_rule=CONF.udev_empty_rule)
for parted in self.driver.partition_scheme.parteds:
for prt in parted.partitions:
# We wipe out the beginning of every new partition
# right after creating it. It allows us to avoid possible
# interactive dialog if some data (metadata or file system)
# present on this new partition and it also allows udev not
# hanging trying to parse this data.
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
'seek=%s' % max(prt.begin - 3, 0), 'count=5',
'of=%s' % prt.device, check_exit_code=[0])
# Also wipe out the ending of every new partition.
# Different versions of md stores metadata in different places.
# Adding exit code 1 to be accepted as for handling situation
# when 'no space left on device' occurs.
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
'seek=%s' % max(prt.end - 3, 0), 'count=5',
'of=%s' % prt.device, check_exit_code=[0, 1])
for parted in self.driver.partition_scheme.parteds:
pu.make_label(parted.name, parted.label)
for prt in parted.partitions:
pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
for flag in prt.flags:
pu.set_partition_flag(prt.device, prt.count, flag)
if prt.guid:
pu.set_gpt_type(prt.device, prt.count, prt.guid)
# If any partition to be created doesn't exist it's an error.
# Probably it's again 'device or resource busy' issue.
if not os.path.exists(prt.name):
raise errors.PartitionNotFoundError(
'Partition %s not found after creation' % prt.name)
LOG.debug("Disabling udev's rules blacklisting")
utils.unblacklist_udev_rules(
udev_rules_dir=CONF.udev_rules_dir,
udev_rename_substr=CONF.udev_rename_substr)
# If one creates partitions with the same boundaries as last time,
# there might be md and lvm metadata on those partitions. To prevent
# failing of creating md and lvm devices we need to make sure
# unused metadata are wiped out.
mu.mdclean_all()
lu.lvremove_all()
lu.vgremove_all()
lu.pvremove_all()
# creating meta disks
for md in self.driver.partition_scheme.mds:
mu.mdcreate(md.name, md.level, md.devices, md.metadata)
# creating physical volumes
for pv in self.driver.partition_scheme.pvs:
lu.pvcreate(pv.name, metadatasize=pv.metadatasize,
metadatacopies=pv.metadatacopies)
# creating volume groups
for vg in self.driver.partition_scheme.vgs:
lu.vgcreate(vg.name, *vg.pvnames)
# creating logical volumes
for lv in self.driver.partition_scheme.lvs:
lu.lvcreate(lv.vgname, lv.name, lv.size)
# making file systems
for fs in self.driver.partition_scheme.fss:
found_images = [img for img in self.driver.image_scheme.images
if img.target_device == fs.device]
if not found_images:
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
class PartitionSchemaCompareTool(object):
def assert_no_diff(self, user_schema, hw_schema):
usr_sch = self._prepare_user_schema(user_schema, hw_schema)
hw_sch = self._prepare_hw_schema(user_schema, hw_schema)
# NOTE(lobur): this may not work on bm hardware: because of the
# partition alignments sizes may not match precisely, so need to
# write own diff tool
if not usr_sch == hw_sch:
diff_str = utils.dict_diff(usr_sch, hw_sch,
"user_schema", "hw_schema")
raise errors.PartitionSchemeMismatchError(diff_str)
LOG.debug("hw_schema and user_schema matched")
def _prepare_user_schema(self, user_schema, hw_schema):
LOG.debug('Preparing user_schema for verification:\n%s' %
user_schema)
# Set all keep_data (preserve) flags to false.
# They are just instructions to deploy driver and do not stored on
# resulting partitions, so we have no means to read them from
# hw_schema
for fs in user_schema['fss']:
fs['keep_data'] = False
fs['os_id'] = []
for parted in user_schema['parteds']:
for part in parted['partitions']:
part['keep_data'] = False
self._drop_schema_size(user_schema)
LOG.debug('Prepared user_schema is:\n%s' % user_schema)
return user_schema
@staticmethod
def _drop_schema_size(schema):
# If it exists, drop the schema size attribute. This should
# be valid because doing a full layout comparison implicitly
# involves verification of the disk sizes, and we don't need
# to check those separately.
for parted in schema['parteds']:
parted.pop('size', None)
def _prepare_hw_schema(self, user_schema, hw_schema):
LOG.debug('Preparing hw_schema to verification:\n%s' %
hw_schema)
user_disks = [p['name'] for p in user_schema['parteds']]
# Ignore disks which are not mentioned in user_schema
filtered_disks = []
for disk in hw_schema['parteds']:
if disk['name'] in user_disks:
filtered_disks.append(disk)
else:
LOG.info("Node disk '%s' is not mentioned in deploy_config"
" thus it will be skipped." % disk['name'])
hw_schema['parteds'] = filtered_disks
# Ignore filesystems that belong to disk not mentioned in user_schema
filtered_fss = []
for fs in hw_schema['fss']:
if fs['device'].rstrip("0123456789") in user_disks:
filtered_fss.append(fs)
else:
LOG.info("Node filesystem '%s' belongs to disk not mentioned"
" in deploy_config thus it will be skipped."
% fs['device'])
hw_schema['fss'] = filtered_fss
# Transform filesystem types
for fs in hw_schema['fss']:
fs['fs_type'] = self._transform_fs_type(fs['fs_type'])
fs['os_id'] = []
self._drop_schema_size(hw_schema)
LOG.debug('Prepared hw_schema is:\n%s' % hw_schema)
return hw_schema
def _transform_fs_type(self, hw_fs_type):
# hw fstype name pattern -> fstype name in user schema
hw_fs_to_user_fs_map = {
'linux-swap': 'swap'
}
for hw_fs_pattern, usr_schema_val in hw_fs_to_user_fs_map.iteritems():
if hw_fs_pattern in hw_fs_type:
LOG.info("Node fs type '%s' is transformed to the user "
"schema type as '%s'."
% (hw_fs_type, usr_schema_val))
return usr_schema_val
return hw_fs_type

View File

@ -21,6 +21,7 @@ from oslo_config import cfg
import six
import yaml
from bareon.drivers.deploy.base import BaseDeployDriver
from bareon import errors
from bareon.openstack.common import log as logging
from bareon.utils import artifact as au
@ -142,29 +143,13 @@ opts = [
),
]
cli_opts = [
cfg.StrOpt(
'data_driver',
default='nailgun',
help='Data driver'
),
cfg.StrOpt(
'image_build_dir',
default='/tmp',
help='Directory where the image is supposed to be built',
),
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.register_cli_opts(cli_opts)
LOG = logging.getLogger(__name__)
class Manager(object):
def __init__(self, data):
self.driver = utils.get_driver(CONF.data_driver)(data)
class Manager(BaseDeployDriver):
def do_clean_filesystems(self):
# NOTE(agordeev): it turns out that only mkfs.xfs needs '-f' flag in
@ -303,7 +288,8 @@ class Manager(object):
)
utils.execute(
'write-mime-multipart', '--output=%s' % ud_output_path,
'write-mime-multipart',
'--output=%s' % ud_output_path,
'%s:text/cloud-boothook' % bh_output_path,
'%s:text/cloud-config' % cc_output_path)
utils.execute('genisoimage', '-output', CONF.config_drive_path,
@ -772,6 +758,12 @@ class Manager(object):
LOG.debug('--- Rebooting node (do_reboot) ---')
utils.execute('reboot')
def do_multiboot_bootloader(self):
pass
def do_install_os(self):
pass
def do_provisioning(self):
LOG.debug('--- Provisioning (do_provisioning) ---')
self.do_partitioning()
@ -825,7 +817,7 @@ class Manager(object):
LOG.debug('Post-install OS configuration')
if hasattr(bs_scheme, 'extra_files') and bs_scheme.extra_files:
for extra in bs_scheme.extra_files:
bu.rsync_inject(extra, chroot)
bu.rsync_inject(extra, chroot)
if (hasattr(bs_scheme, 'root_ssh_authorized_file') and
bs_scheme.root_ssh_authorized_file):
LOG.debug('Put ssh auth file %s',
@ -889,8 +881,8 @@ class Manager(object):
except OSError:
LOG.debug('Finally: directory %s seems does not exist '
'or can not be removed', c_dir)
# TODO(kozhukalov): Split this huge method
# TODO(kozhukalov): Split this huge method
# into a set of smaller ones
# https://bugs.launchpad.net/fuel/+bug/1444090
def do_build_image(self):

View File

@ -0,0 +1,40 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from bareon.drivers.deploy.generic import GenericDeployDriver
from bareon.openstack.common import log as logging
from bareon.utils import utils
LOG = logging.getLogger(__name__)
class Rsync(GenericDeployDriver):
def do_copyimage(self, os_id):
os_path = '/tmp/target/'
with self.mount_target(os_path, os_id, pseudo=False,
treat_mtab=False):
for image in self.driver.image_scheme.get_os_images(os_id):
target_image_path = os.path.join(os_path,
image.target_device.strip(
os.sep))
LOG.debug('Starting rsync from %s to %s', image.uri,
target_image_path)
rsync_flags = image.deployment_flags.get('rsync_flags',
'-a -A -X')
utils.execute('rsync', rsync_flags, image.uri,
target_image_path, check_exit_code=[0])

View File

@ -0,0 +1,131 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from bareon.drivers.deploy.generic import GenericDeployDriver
from bareon import errors
from bareon.openstack.common import log as logging
from bareon.utils import artifact as au
from bareon.utils import fs as fu
from bareon.utils import utils
opts = [
cfg.StrOpt(
'image_build_suffix',
default='.bareon-image',
help='Suffix which is used while creating temporary files',
),
cfg.IntOpt(
'max_loop_devices_count',
default=255,
# NOTE(agordeev): up to 256 loop devices could be allocated up to
# kernel version 2.6.23, and the limit (from version 2.6.24 onwards)
# isn't theoretically present anymore.
help='Maximum allowed loop devices count to use'
),
cfg.IntOpt(
'sparse_file_size',
# XXX: Apparently Fuel configures the node root filesystem to span
# the whole hard drive. However 2 GB filesystem created with default
# options can grow at most to 2 TB (1024x its initial size). This
# maximal size can be configured by mke2fs -E resize=NNN option,
# however the version of e2fsprogs shipped with CentOS 6.[65] seems
# to silently ignore the `resize' option. Therefore make the initial
# filesystem a bit bigger so it can grow to 8 TB.
default=8192,
help='Size of sparse file in MiBs'
),
cfg.IntOpt(
'loop_device_major_number',
default=7,
help='System-wide major number for loop device'
),
cfg.IntOpt(
'fetch_packages_attempts',
default=10,
help='Maximum allowed debootstrap/apt-get attempts to execute'
),
cfg.StrOpt(
'allow_unsigned_file',
default='allow_unsigned_packages',
help='File where to store apt setting for unsigned packages'
),
cfg.StrOpt(
'force_ipv4_file',
default='force_ipv4',
help='File where to store apt setting for forcing IPv4 usage'
),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = logging.getLogger(__name__)
class Swift(GenericDeployDriver):
def do_copyimage(self, os_id):
LOG.debug('--- Copying images (do_copyimage) ---')
for image in self.driver.image_scheme.get_os_images(os_id):
LOG.debug('Processing image: %s' % image.uri)
processing = au.Chain()
LOG.debug('Appending uri processor: %s' % image.uri)
processing.append(image.uri)
if image.uri.startswith('http://'):
LOG.debug('Appending HTTP processor')
processing.append(au.HttpUrl)
elif image.uri.startswith('file://'):
LOG.debug('Appending FILE processor')
processing.append(au.LocalFile)
if image.container == 'gzip':
LOG.debug('Appending GZIP processor')
processing.append(au.GunzipStream)
LOG.debug('Appending TARGET processor: %s' % image.target_device)
target = self.driver.partition_scheme.fs_by_mount(
image.target_device, os_id=os_id).device
processing.append(target)
LOG.debug('Launching image processing chain')
processing.process()
if image.size and image.md5:
LOG.debug('Trying to compare image checksum')
actual_md5 = utils.calculate_md5(image.target_device,
image.size)
if actual_md5 == image.md5:
LOG.debug('Checksum matches successfully: md5=%s' %
actual_md5)
else:
raise errors.ImageChecksumMismatchError(
'Actual checksum %s mismatches with expected %s for '
'file %s' % (actual_md5, image.md5,
image.target_device))
else:
LOG.debug('Skipping image checksum comparing. '
'Ether size or hash have been missed')
LOG.debug('Extending image file systems')
if image.format in ('ext2', 'ext3', 'ext4', 'xfs'):
LOG.debug('Extending %s %s' %
(image.format, image.target_device))
fu.extend_fs(image.format, image.target_device)
fu.change_uuid(target)

View File

@ -1,149 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from bareon import errors
from bareon.openstack.common import log as logging
LOG = logging.getLogger(__name__)
KS_SPACES_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'Partition scheme',
'type': 'array',
'minItems': 1,
'uniqueItems': True,
'items': {
'anyOf': [
{
'type': 'object',
'required': ['type', 'id', 'volumes', 'name',
'size', 'extra', 'free_space'],
'properties': {
'type': {'enum': ['disk']},
'id': {'type': 'string'},
'name': {'type': 'string'},
'size': {'type': 'integer'},
'free_space': {'type': 'integer'},
'extra': {
'type': 'array',
'items': {'type': 'string'},
},
'volumes': {
'type': 'array',
'items': {
'anyOf': [
{
'type': 'object',
'required': ['type', 'size',
'lvm_meta_size', 'vg'],
'properties': {
'type': {'enum': ['pv']},
'size': {'type': 'integer'},
'lvm_meta_size': {'type': 'integer'},
'vg': {'type': 'string'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['raid',
'partition']},
'size': {'type': 'integer'},
'mount': {'type': 'string'},
'file_system': {'type': 'string'},
'name': {'type': 'string'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['boot']},
'size': {'type': 'integer'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['lvm_meta_pool']},
'size': {'type': 'integer'}
}
},
]
}
}
}
},
{
'type': 'object',
'required': ['type', 'id', 'volumes'],
'properties': {
'type': {'enum': ['vg']},
'id': {'type': 'string'},
'label': {'type': 'string'},
'min_size': {'type': 'integer'},
'_allocate_size': {'type': 'string'},
'volumes': {
'type': 'array',
'items': {
'type': 'object',
'required': ['type', 'size', 'name'],
'properties': {
'type': {'enum': ['lv']},
'size': {'type': 'integer'},
'name': {'type': 'string'},
'mount': {'type': 'string'},
'file_system': {'type': 'string'},
}
}
}
}
}
]
}
}
def validate(scheme):
"""Validates a given partition scheme using jsonschema.
:param scheme: partition scheme to validate
"""
try:
checker = jsonschema.FormatChecker()
jsonschema.validate(scheme, KS_SPACES_SCHEMA,
format_checker=checker)
except Exception as exc:
LOG.exception(exc)
raise errors.WrongPartitionSchemeError(str(exc))
# scheme is not valid if the number of disks is 0
if not [d for d in scheme if d['type'] == 'disk']:
raise errors.WrongPartitionSchemeError(
'Partition scheme seems empty')
for space in scheme:
for volume in space.get('volumes', []):
if volume['size'] > 16777216 and volume['mount'] == '/':
raise errors.WrongPartitionSchemeError(
'Root file system must be less than 16T')
# TODO(kozhukalov): need to have additional logical verifications
# maybe sizes and format of string values

View File

@ -27,6 +27,18 @@ class WrongPartitionSchemeError(BaseError):
pass
class WrongPartitionPolicyError(BaseError):
pass
class PartitionSchemeMismatchError(BaseError):
pass
class HardwarePartitionSchemeCannotBeReadError(BaseError):
pass
class WrongPartitionLabelError(BaseError):
pass

View File

@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from bareon import errors
@ -19,7 +20,9 @@ class Image(object):
SUPPORTED_CONTAINERS = ['raw', 'gzip']
def __init__(self, uri, target_device,
format, container, size=None, md5=None):
format, container, size=None, md5=None, os_id=None,
os_boot=False, image_name='', image_uuid='',
deployment_flags={}):
# uri is something like
# http://host:port/path/to/image.img or
# file:///tmp/image.img
@ -35,6 +38,11 @@ class Image(object):
self.size = size
self.md5 = md5
self.img_tmp_file = None
self.os_id = os_id
self.os_boot = os_boot
self.image_name = image_name
self.image_uuid = image_uuid
self.deployment_flags = deployment_flags
class ImageScheme(object):
@ -43,3 +51,16 @@ class ImageScheme(object):
def add_image(self, **kwargs):
self.images.append(Image(**kwargs))
def get_images_sorted_by_depth(self, os_id=None, reverse=False):
key = lambda x: x.target_device.rstrip(os.path.sep).count(os.path.sep)
return sorted(self.get_os_images(), key=key, reverse=reverse)
def get_os_images(self, os_id=None):
if os_id:
return filter(lambda img: os_id in img.os_id, self.images)
return self.images
def get_os_root(self, os_id=None):
images = self.get_os_images(os_id)
return next((image for image in images if image.target_device == '/'))

View File

@ -18,14 +18,18 @@ from bareon.objects import base
class FileSystem(base.Serializable):
def __init__(self, device, mount=None, fs_type=None,
fs_options=None, fs_label=None, keep_data=False):
def __init__(self, device, mount=None, fs_type=None, fs_options=None,
fs_label=None, keep_data=False, fstab_enabled=True,
fstab_options='defaults', os_id=[]):
self.keep_data = keep_data
self.device = device
self.mount = mount
self.type = fs_type or 'xfs'
self.type = fs_type if (fs_type is not None) else 'xfs'
self.options = fs_options or ''
self.fstab_options = fstab_options
self.label = fs_label or ''
self.fstab_enabled = fstab_enabled
self.os_id = os_id
def to_dict(self):
return {
@ -35,4 +39,7 @@ class FileSystem(base.Serializable):
'fs_options': self.options,
'fs_label': self.label,
'keep_data': self.keep_data,
'fstab_enabled': self.fstab_enabled,
'fstab_options': self.fstab_options,
'os_id': self.os_id,
}

View File

@ -25,11 +25,13 @@ LOG = logging.getLogger(__name__)
class Parted(base.Serializable):
def __init__(self, name, label, partitions=None, install_bootloader=False):
def __init__(self, name, label, partitions=None, install_bootloader=False,
disk_size=None):
self.name = name
self.label = label
self.partitions = partitions or []
self.install_bootloader = install_bootloader
self.disk_size = disk_size
def add_partition(self, **kwargs):
# TODO(kozhukalov): validate before appending
@ -111,6 +113,7 @@ class Parted(base.Serializable):
'label': self.label,
'partitions': partitions,
'install_bootloader': self.install_bootloader,
'disk_size': self.disk_size
}
@classmethod

View File

@ -126,13 +126,17 @@ class PartitionScheme(object):
metadatacopies=metadatacopies)
vg.add_pv(pv.name)
def fs_by_mount(self, mount):
return next((x for x in self.fss if x.mount == mount), None)
def fs_by_mount(self, mount, os_id=None):
found = filter(lambda x: (x.mount and x.mount == mount), self.fss)
if os_id:
found = filter(lambda x: (x.os_id and os_id in x.os_id), found)
if found:
return found[0]
def fs_by_device(self, device):
return next((x for x in self.fss if x.device == device), None)
def fs_sorted_by_depth(self, reverse=False):
def fs_sorted_by_depth(self, os_id=None, reverse=False):
"""Getting file systems sorted by path length.
Shorter paths earlier.
@ -141,7 +145,18 @@ class PartitionScheme(object):
"""
def key(x):
return x.mount.rstrip(os.path.sep).count(os.path.sep)
return sorted(self.fss, key=key, reverse=reverse)
sorted_fss = sorted(self.fss, key=key, reverse=reverse)
return filter(lambda fs: self._os_filter(fs, os_id), sorted_fss)
def _os_filter(self, file_system, os_id):
if os_id:
return os_id in file_system.os_id
else:
return True
def fs_by_os_id(self, os_id):
return filter(lambda fs: self._os_filter(fs, os_id), self.fss)
def lv_by_device_name(self, device_name):
return next((x for x in self.lvs if x.device_name == device_name),

View File

@ -153,12 +153,14 @@ class BuildUtilsTestCase(unittest2.TestCase):
self.assertEqual([mock.call('chroot', f) for f in files],
mock_path.join.call_args_list)
@unittest2.skip("Fix after cray rebase")
@mock.patch.object(bu, 'remove_files')
@mock.patch.object(bu, 'clean_dirs')
def test_clean_apt_settings(self, mock_dirs, mock_files):
bu.clean_apt_settings('chroot', 'unsigned', 'force_ipv4')
mock_dirs.assert_called_once_with(
'chroot', ['etc/apt/preferences.d', 'etc/apt/sources.list.d'])
files = set(['etc/apt/sources.list', 'etc/apt/preferences',
'etc/apt/apt.conf.d/%s' % 'force_ipv4',
'etc/apt/apt.conf.d/%s' % 'unsigned',
@ -168,6 +170,11 @@ class BuildUtilsTestCase(unittest2.TestCase):
self.assertEqual('chroot', mock_files.call_args[0][0])
self.assertEqual(files, set(mock_files.call_args[0][1]))
mock_files.assert_called_once_with(
'chroot', ['etc/apt/sources.list', 'etc/apt/preferences',
'etc/apt/apt.conf.d/%s' % 'force_ipv4',
'etc/apt/apt.conf.d/%s' % 'unsigned'])
@mock.patch('bareon.utils.build.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.utils.build.os.path')

View File

@ -0,0 +1,181 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from bareon.drivers.data import generic
class TestKsDisks(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestKsDisks, self).__init__(*args, **kwargs)
self.driver = generic.GenericDataDriver(None)
self.driver._partition_data = self.mock_part_data = mock.MagicMock()
def test_no_partition_data(self):
self.mock_part_data.return_value = []
desired = []
result = self.driver._ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_no_partitions_valid_size(self):
self.mock_part_data.return_value = [
{'size': -100, 'type': 'disk'},
{'size': 0, 'type': 'disk'}
]
desired = []
result = self.driver._ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_no_partitions_valid_type(self):
self.mock_part_data.return_value = [
{'size': 100, 'type': 'vg'},
{'size': 200, 'type': 'pv'}
]
desired = []
result = self.driver._ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_valid_data(self):
self.mock_part_data.return_value = [
{'size': 100, 'type': 'vg'},
{'size': 200, 'type': 'disk'}
]
desired = [{'size': 200, 'type': 'disk'}]
result = self.driver._ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
class TestKsVgs(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestKsVgs, self).__init__(*args, **kwargs)
self.driver = generic.GenericDataDriver(None)
self.driver._partition_data = self.mock_part_data = mock.MagicMock()
def test_no_partition_data(self):
self.mock_part_data.return_value = []
desired = []
result = self.driver._ks_vgs
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_no_partitions_valid_type(self):
self.mock_part_data.return_value = [
{'type': 'disk'},
{'type': 'pv'}
]
desired = []
result = self.driver._ks_vgs
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_valid_data(self):
self.mock_part_data.return_value = [
{'type': 'vg'},
{'type': 'disk'}
]
desired = [{'type': 'vg'}]
result = self.driver._ks_vgs
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
class TestSmallKsDisks(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestSmallKsDisks, self).__init__(*args, **kwargs)
self.driver = generic.GenericDataDriver(None)
self.driver._partition_data = self.mock_part_data = mock.MagicMock()
def test_no_partition_data(self):
self.mock_part_data.return_value = []
desired = []
result = self.driver._small_ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_no_partitions_valid_size(self):
self.mock_part_data.return_value = [
{'size': 3 * 1024 * 1024, 'type': 'disk'},
{'size': 5 * 1024 * 1024, 'type': 'disk'}
]
desired = []
result = self.driver._small_ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
def test_valid_data(self):
self.mock_part_data.return_value = [
{'size': 3 * 1024 * 1024, 'type': 'vg'},
{'size': 1 * 1024 * 1024, 'type': 'disk'}
]
desired = [{'size': 1 * 1024 * 1024, 'type': 'disk'}]
result = self.driver._small_ks_disks
self.assertEqual(result, desired)
self.mock_part_data.assert_called_once_with()
class TestGetLabel(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestGetLabel, self).__init__(*args, **kwargs)
self.driver = generic.GenericDataDriver(None)
def test_no_label(self):
label = None
desired = ''
result = self.driver._getlabel(label)
self.assertEqual(result, desired)
def test_long_label(self):
label = 'l' * 100
desired = ' -L {0} '.format('l' * 12)
result = self.driver._getlabel(label)
self.assertEqual(result, desired)
def test_valid_label(self):
label = 'label'
desired = ' -L {0} '.format(label)
result = self.driver._getlabel(label)
self.assertEqual(result, desired)

View File

@ -0,0 +1,732 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import mock
import os
import unittest2
from oslo_config import cfg
from bareon.drivers.deploy import generic
from bareon import errors
from bareon.objects.partition.fs import FileSystem
CONF = cfg.CONF
class TestDoReboot(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestDoReboot, self).__init__(*args, **kwargs)
self.driver = generic.GenericDeployDriver(None)
@mock.patch('bareon.utils.utils.execute')
def test_do_reboot(self, mock_execute):
result = self.driver.do_reboot()
self.assertEqual(result, None)
mock_execute.assert_called_once_with('reboot')
@unittest2.skip("Fix after cray rebase")
class TestDoProvision(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestDoProvision, self).__init__(*args, **kwargs)
self.driver = generic.GenericDeployDriver(None)
def test_do_provision(self):
self.driver.do_partitioning = mock_partitioning = mock.MagicMock()
self.driver.do_configdrive = mock_configdrive = mock.MagicMock()
self.driver.do_copyimage = mock_copyimage = mock.MagicMock()
self.driver.do_bootloader = mock_bootloader = mock.MagicMock()
result = self.driver.do_provisioning()
self.assertEqual(result, None)
mock_partitioning.assert_called_once_with()
mock_configdrive.assert_called_once_with()
mock_copyimage.assert_called_once_with()
mock_bootloader.assert_called_once_with()
class TestDoConfigDrive(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestDoConfigDrive, self).__init__(*args, **kwargs)
self.mock_data_driver = mock.MagicMock()
self.driver = generic.GenericDeployDriver(self.mock_data_driver)
def test_do_configdrive(self):
result = self.driver.do_configdrive()
self.assertEqual(result, None)
self.mock_data_driver.create_configdrive.assert_called_once_with()
class TestMountTarget(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestMountTarget, self).__init__(*args, **kwargs)
self.mock_data = mock.MagicMock()
self.driver = generic.GenericDeployDriver(self.mock_data)
self.fs_sorted = self.mock_data.partition_scheme.fs_sorted_by_depth
@mock.patch('bareon.utils.fs.mount_bind')
@mock.patch('bareon.utils.utils.makedirs_if_not_exists')
def test_pseudo(self, mock_makedirs, mock_mount_bind):
self.fs_sorted.return_value = []
pseudo_fs = ('/sys', '/dev', '/proc')
chroot = '/tmp/target'
os_id = 'test'
result = self.driver._mount_target(chroot, os_id, True,
False)
self.assertEqual(result, None)
mock_makedirs.assert_has_calls([mock.call(chroot + path)
for path in pseudo_fs], any_order=True)
mock_mount_bind.assert_has_calls([mock.call(chroot, path)
for path in pseudo_fs],
any_order=True)
self.fs_sorted.assert_called_once_with(os_id)
@mock.patch('os.path.islink', return_value=False)
@mock.patch('bareon.utils.utils.execute')
@mock.patch('__builtin__.open')
def test_treat_mtab_no_link(self, mock_open, mock_execute, mock_islink):
chroot = '/tmp/target'
os_id = 'test'
mock_open.return_value = context_manager = mock.MagicMock()
context_manager.__enter__.return_value = file_mock = mock.MagicMock()
mock_execute.return_value = mtab = ('mtab',)
result = self.driver._mount_target(chroot, os_id, False,
True)
self.assertEqual(result, None)
mock_execute.assert_called_once_with('chroot', chroot, 'grep', '-v',
'rootfs', '/proc/mounts')
mock_islink.assert_called_once_with(chroot + '/etc/mtab')
file_mock.assert_has_calls([mock.call.write(mtab[0])], any_order=True)
@mock.patch('os.remove')
@mock.patch('os.path.islink', return_value=True)
@mock.patch('bareon.utils.utils.execute')
@mock.patch('__builtin__.open')
def test_treat_mtab_link(self, mock_open, mock_execute, mock_islink,
mock_remove):
chroot = '/tmp/target'
os_id = 'test'
mock_open.return_value = context_manager = mock.MagicMock()
context_manager.__enter__.return_value = file_mock = mock.MagicMock()
mock_execute.return_value = mtab = ('mtab',)
result = self.driver._mount_target(chroot, os_id, False,
True)
self.assertEqual(result, None)
mock_execute.assert_called_once_with('chroot', chroot, 'grep', '-v',
'rootfs', '/proc/mounts')
mock_islink.assert_called_once_with(chroot + '/etc/mtab')
mock_remove.assert_called_once_with(chroot + '/etc/mtab')
file_mock.assert_has_calls([mock.call.write(mtab[0])], any_order=True)
@mock.patch('bareon.utils.fs.mount_fs')
@mock.patch('bareon.utils.utils.makedirs_if_not_exists')
def test_partition_swap(self, mock_makedirs, mock_mount):
chroot = '/tmp/target/'
os_id = 'test'
fs = namedtuple('fs', 'mount type device')
fss = [fs(mount='swap', type='swap', device='/dev/sdc'),
fs(mount='/', type='ext4', device='/dev/sda'),
fs(mount='/usr', type='ext4', device='/dev/sdb')]
self.fs_sorted.return_value = fss
result = self.driver._mount_target(chroot, os_id, False,
False)
self.assertEqual(result, None)
mock_makedirs.assert_has_calls(
[mock.call(os.path.join(chroot, f.mount.strip(os.sep))) for f
in fss[1:]], any_order=True)
mock_mount.assert_has_calls(
[mock.call(f.type, str(f.device),
os.path.join(chroot, f.mount.strip(os.sep))) for f
in fss[1:]], any_order=True)
self.fs_sorted.assert_called_once_with(os_id)
@mock.patch('bareon.utils.fs.mount_fs')
@mock.patch('bareon.utils.utils.makedirs_if_not_exists')
def test_partition(self, mock_makedirs, mock_mount):
chroot = '/tmp/target/'
os_id = 'test'
fs = namedtuple('fs', 'mount type device')
fss = [fs(mount='/', type='ext4', device='/dev/sda'),
fs(mount='/usr', type='ext4', device='/dev/sdb')]
self.fs_sorted.return_value = fss
result = self.driver._mount_target(chroot, os_id, False,
False)
self.assertEqual(result, None)
mock_makedirs.assert_has_calls(
[mock.call(os.path.join(chroot, f.mount.strip(os.sep))) for f
in fss], any_order=True)
mock_mount.assert_has_calls(
[mock.call(f.type, str(f.device),
os.path.join(chroot, f.mount.strip(os.sep))) for f
in fss], any_order=True)
self.fs_sorted.assert_called_once_with(os_id)
class TestUmountTarget(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestUmountTarget, self).__init__(*args, **kwargs)
self.mock_data = mock.MagicMock()
self.driver = generic.GenericDeployDriver(self.mock_data)
self.fs_sorted = self.mock_data.partition_scheme.fs_sorted_by_depth
@mock.patch('bareon.utils.fs.umount_fs')
def test_pseudo(self, mock_umount_fs):
self.fs_sorted.return_value = []
pseudo_fs = ('/sys', '/dev', '/proc')
chroot = '/tmp/target'
result = self.driver._umount_target(chroot, True)
self.assertEqual(result, None)
mock_umount_fs.assert_has_calls(
[mock.call(chroot + path, try_lazy_umount=True) for path in
pseudo_fs], any_order=True)
@mock.patch('bareon.utils.fs.umount_fs')
def test_partition(self, mock_umount):
chroot = '/tmp/target/'
os_id = 'test'
fs = namedtuple('fs', 'mount type device')
fss = [fs(mount='/', type='ext4', device='/dev/sda'),
fs(mount='/usr', type='ext4', device='/dev/sdb')]
self.fs_sorted.return_value = fss
result = self.driver._umount_target(chroot, os_id, False)
self.assertEqual(result, None)
mock_umount.assert_has_calls(
[mock.call(os.path.join(chroot, f.mount.strip(os.sep))) for f
in fss], any_order=True)
self.fs_sorted.assert_called_once_with(os_id, True)
@mock.patch('bareon.utils.fs.umount_fs')
def test_partition_swap(self, mock_umount):
chroot = '/tmp/target/'
os_id = 'test'
fs = namedtuple('fs', 'mount type device')
fss = [fs(mount='swap', type='swap', device='/dev/sdc'),
fs(mount='/', type='ext4', device='/dev/sda'),
fs(mount='/usr', type='ext4', device='/dev/sdb')]
self.fs_sorted.return_value = fss
result = self.driver._umount_target(chroot, os_id, False)
self.assertEqual(result, None)
mock_umount.assert_has_calls(
[mock.call(os.path.join(chroot, f.mount.strip(os.sep))) for f
in fss[1:]], any_order=True)
self.fs_sorted.assert_called_once_with(os_id, True)
class TestDoBootloader(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestDoBootloader, self).__init__(*args, **kwargs)
self.mock_data = mock.MagicMock()
self.driver = generic.GenericDeployDriver(self.mock_data)
self.driver._generate_fstab = mock.MagicMock()
self.mock_mount = self.driver.mount_target = mock.MagicMock()
self.mock_umount = self.driver._umount_target = mock.MagicMock()
self.mock_grub = self.mock_data.grub
@mock.patch('bareon.utils.grub.grub2_install')
@mock.patch('bareon.utils.grub.grub2_cfg')
@mock.patch('bareon.utils.grub.guess_grub_version')
@mock.patch('bareon.utils.utils.execute')
def test_wrong_version_grub_2(self, mock_execute, mock_guess_grub,
mock_grub2_cfg, mock_grub2_install):
chroot = '/tmp/target'
os_id = 'test'
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='swap', type='swap', device='/dev/sdc', os_id=[os_id]),
fs(mount='/', type='ext4', device='/dev/sda', os_id=[os_id]),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id=[os_id])]
self.mock_data.partition_scheme.fss = fss
self.mock_data.boot_on_lvm = mock.Mock()
mock_execute.side_effect = [('uuid1',), ('uuid2',), ('uuid3',)]
self.mock_grub.version = 1
mock_guess_grub.return_value = 2
self.mock_grub.kernel_name = 'kernel_name'
self.mock_grub.initrd_name = 'initrd_name'
self.mock_grub.kernel_params = kernel_params = 'params'
self.mock_data.partition_scheme.boot_device.return_value = fss[
1].device
result = self.driver.do_singleboot_bootloader(chroot, os_id)
self.assertEqual(result, None)
mock_grub2_cfg.assert_called_once_with(
kernel_params=kernel_params,
chroot=chroot,
grub_timeout=CONF.grub_timeout,
lvm_boot=self.mock_data.boot_on_lvm)
mock_grub2_install.assert_called_once_with(
[], chroot=chroot,
lvm_boot=self.mock_data.boot_on_lvm)
@mock.patch('bareon.utils.grub.grub1_install')
@mock.patch('bareon.utils.grub.grub1_cfg')
@mock.patch('bareon.utils.grub.guess_grub_version')
@mock.patch('bareon.utils.utils.execute')
def test_wrong_version_grub_1(self, mock_execute, mock_guess_grub,
mock_grub1_cfg, mock_grub1_install):
chroot = '/tmp/target'
os_id = 'test'
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='swap', type='swap', device='/dev/sdc', os_id=[os_id]),
fs(mount='/', type='ext4', device='/dev/sda', os_id=[os_id]),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id=[os_id])]
self.mock_data.partition_scheme.fs_by_os_id.return_value = fss
self.mock_data.boot_on_lvm = None
mock_execute.side_effect = [('uuid1',), ('uuid2',), ('uuid3',)]
self.mock_grub.version = 2
mock_guess_grub.return_value = 1
self.mock_grub.kernel_name = kernel_name = 'kernel_name'
self.mock_grub.initrd_name = initrd_name = 'initrd_name'
self.mock_grub.kernel_params = kernel_params = 'params'
self.mock_data.partition_scheme.boot_device.return_value = fss[
1].device
result = self.driver.do_singleboot_bootloader(chroot, os_id)
self.assertEqual(result, None)
mock_grub1_cfg.assert_called_once_with(kernel=kernel_name,
initrd=initrd_name,
kernel_params=kernel_params,
chroot=chroot,
grub_timeout=CONF.grub_timeout)
mock_grub1_install.assert_called_once_with([], '/dev/sda',
chroot=chroot)
@mock.patch('bareon.utils.grub.grub2_install')
@mock.patch('bareon.utils.grub.grub2_cfg')
@mock.patch('bareon.utils.grub.guess_grub_version')
@mock.patch('bareon.utils.utils.execute')
def test_grub_2(self, mock_execute, mock_guess_grub, mock_grub2_cfg,
mock_grub2_install):
chroot = '/tmp/target'
os_id = 'test'
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='swap', type='swap', device='/dev/sdc', os_id=[os_id]),
fs(mount='/', type='ext4', device='/dev/sda', os_id=[os_id]),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id=[os_id])]
self.mock_data.partition_scheme.fss = fss
self.mock_data.boot_on_lvm = None
mock_execute.side_effect = [('uuid1',), ('uuid2',), ('uuid3',)]
self.mock_grub.version = mock_guess_grub.return_value = 2
self.mock_grub.kernel_name = 'kernel_name'
self.mock_grub.initrd_name = 'initrd_name'
self.mock_grub.kernel_params = kernel_params = 'params'
self.mock_data.partition_scheme.boot_device.return_value = fss[
1].device
result = self.driver.do_singleboot_bootloader(chroot, os_id)
self.assertEqual(result, None)
mock_grub2_cfg.assert_called_once_with(kernel_params=kernel_params,
chroot=chroot,
grub_timeout=CONF.grub_timeout,
lvm_boot=None)
mock_grub2_install.assert_called_once_with([], chroot=chroot,
lvm_boot=None)
@mock.patch('bareon.utils.grub.grub1_install')
@mock.patch('bareon.utils.grub.grub1_cfg')
@mock.patch('bareon.utils.grub.guess_grub_version')
@mock.patch('bareon.utils.utils.execute')
def test_grub_1(self, mock_execute, mock_guess_grub, mock_grub1_cfg,
mock_grub1_install):
chroot = '/tmp/target'
os_id = 'test'
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='swap', type='swap', device='/dev/sdc', os_id=[os_id]),
fs(mount='/', type='ext4', device='/dev/sda', os_id=[os_id]),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id=[os_id])]
self.mock_data.partition_scheme.fs_by_os_id.return_value = fss
mock_execute.side_effect = [('uuid1',), ('uuid2',), ('uuid3',)]
self.mock_grub.version = mock_guess_grub.return_value = 1
self.mock_grub.kernel_name = kernel_name = 'kernel_name'
self.mock_grub.initrd_name = initrd_name = 'initrd_name'
self.mock_grub.kernel_params = kernel_params = 'params'
self.mock_data.partition_scheme.boot_device.return_value = fss[
1].device
self.mock_data.boot_on_lvm = None
result = self.driver.do_singleboot_bootloader(chroot, os_id)
self.assertEqual(result, None)
mock_grub1_cfg.assert_called_once_with(kernel=kernel_name,
initrd=initrd_name,
kernel_params=kernel_params,
chroot=chroot,
grub_timeout=CONF.grub_timeout)
mock_grub1_install.assert_called_once_with([], '/dev/sda',
chroot=chroot)
@mock.patch('bareon.utils.grub.guess_initrd')
@mock.patch('bareon.utils.grub.guess_kernel')
@mock.patch('bareon.utils.grub.grub1_install')
@mock.patch('bareon.utils.grub.grub1_cfg')
@mock.patch('bareon.utils.grub.guess_grub_version')
@mock.patch('bareon.utils.utils.execute')
def test_grub1_nokernel_noinitrd(self, mock_execute, mock_guess_grub,
mock_grub1_cfg, mock_grub1_install,
mock_guess_kernel, mock_guess_initrd):
chroot = '/tmp/target'
os_id = 'test'
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='swap', type='swap', device='/dev/sdc', os_id=[os_id]),
fs(mount='/', type='ext4', device='/dev/sda', os_id=[os_id]),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id=[os_id])]
self.mock_data.partition_scheme.fs_by_os_id.return_value = fss
mock_execute.side_effect = [('uuid1',), ('uuid2',), ('uuid3',)]
self.mock_grub.version = mock_guess_grub.return_value = 1
self.mock_grub.kernel_name = None
self.mock_grub.initrd_name = None
self.mock_grub.kernel_regexp = kernel_regex = 'kernel_regex'
self.mock_grub.initrd_regexp = initrd_regex = 'initrd_regex'
self.mock_grub.kernel_params = kernel_params = 'params'
self.mock_data.partition_scheme.boot_device.return_value = fss[
1].device
self.mock_data.boot_on_lvm = None
mock_guess_kernel.return_value = kernel_name = 'kernel_name'
mock_guess_initrd.return_value = initrd_name = 'initrd_name'
result = self.driver.do_singleboot_bootloader(chroot, os_id)
self.assertEqual(result, None)
mock_grub1_cfg.assert_called_once_with(kernel=kernel_name,
initrd=initrd_name,
kernel_params=kernel_params,
chroot=chroot,
grub_timeout=CONF.grub_timeout)
mock_grub1_install.assert_called_once_with([], '/dev/sda',
chroot=chroot)
mock_guess_kernel.assert_called_once_with(chroot=chroot,
regexp=kernel_regex)
mock_guess_initrd.assert_called_once_with(chroot=chroot,
regexp=initrd_regex)
class TestGenerateFstab(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestGenerateFstab, self).__init__(*args, **kwargs)
self.mock_data = mock.MagicMock()
self.driver = generic.GenericDeployDriver(self.mock_data)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('__builtin__.open')
def test_success(self, mock_open, mock_execute):
chroot = '/tmp/target'
os_id = 'test'
fss = [FileSystem('/dev/sdc', mount='swap', fs_type='swap',
os_id=[os_id]),
FileSystem('/dev/sda', mount='/', fs_type='ext4',
os_id=[os_id]),
FileSystem('/dev/sdb', mount='/usr', fs_type='ext4',
os_id=[os_id])]
self.mock_data.partition_scheme.fs_by_os_id.return_value = fss
self.driver._mount2uuid = mock_mount2uuid = mock.MagicMock()
mock_mount2uuid.return_value = {fs.mount: id for id, fs in
enumerate(fss)}
mock_open.return_value = context_manager = mock.MagicMock()
context_manager.__enter__.return_value = file_mock = mock.MagicMock()
result = self.driver.do_generate_fstab(chroot, 'test')
self.assertEqual(result, None)
file_mock.assert_has_calls(
[mock.call.write('UUID=0 swap swap defaults 0 0\n'),
mock.call.write('UUID=1 / ext4 defaults 0 0\n'),
mock.call.write('UUID=2 /usr ext4 defaults 0 0\n')],
any_order=True)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('__builtin__.open')
def test_fstab_disabled(self, mock_open, mock_execute):
chroot = '/tmp/target'
os_id = 'test'
fss = [FileSystem('/dev/sdc', mount='swap', fs_type='swap',
os_id=[os_id]),
FileSystem('/dev/sda', mount='/', fs_type='ext4',
fstab_enabled=True, os_id=[os_id]),
FileSystem('/dev/sdb', mount='/usr', fs_type='ext4',
fstab_enabled=False, os_id=[os_id])]
self.mock_data.partition_scheme.fs_by_os_id.return_value = fss
self.driver._mount2uuid = mock_mount2uuid = mock.MagicMock()
mock_mount2uuid.return_value = {fs.mount: id for id, fs in
enumerate(fss)}
mock_open.return_value = context_manager = mock.MagicMock()
context_manager.__enter__.return_value = file_mock = mock.MagicMock()
result = self.driver.do_generate_fstab(chroot, 'test')
self.assertEqual(result, None)
file_mock.assert_has_calls(
[mock.call.write('UUID=0 swap swap defaults 0 0\n'),
mock.call.write('UUID=1 / ext4 defaults 0 0\n'),
mock.call.write('#UUID=2 /usr ext4 defaults 0 0\n')],
any_order=True)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('__builtin__.open')
def test_fstab_options(self, mock_open, mock_execute):
chroot = '/tmp/target'
os_id = 'test'
fss = [FileSystem('/dev/sdc', mount='swap', fs_type='swap',
os_id=[os_id]),
FileSystem('/dev/sda', mount='/', fs_type='ext4',
fstab_options='defaults', os_id=[os_id]),
FileSystem('/dev/sdb', mount='/usr', fs_type='ext4',
fstab_options='noatime', os_id=[os_id])]
self.mock_data.partition_scheme.fs_by_os_id.return_value = fss
self.driver._mount2uuid = mock_mount2uuid = mock.MagicMock()
mock_mount2uuid.return_value = {fs.mount: id for id, fs in
enumerate(fss)}
mock_open.return_value = context_manager = mock.MagicMock()
context_manager.__enter__.return_value = file_mock = mock.MagicMock()
result = self.driver.do_generate_fstab(chroot, 'test')
self.assertEqual(result, None)
file_mock.assert_has_calls(
[mock.call.write('UUID=0 swap swap defaults 0 0\n'),
mock.call.write('UUID=1 / ext4 defaults 0 0\n'),
mock.call.write('UUID=2 /usr ext4 noatime 0 0\n')],
any_order=True)
@mock.patch("bareon.drivers.deploy.generic.PartitionSchemaCompareTool")
class TestPolicyPartitioner(unittest2.TestCase):
def setup(self, policy, cmp_mock, keep_data_flag_present=False):
cmp_mock().configure_mock(_mock_unsafe=True)
cmp_mock().assert_no_diff.return_value = True
self.driver = mock.Mock(
partitions_policy=policy,
partition_scheme=mock.Mock(
**{'to_dict.return_value': {"test": 1},
'skip_partitioning': keep_data_flag_present}),
hw_partition_scheme=mock.Mock(
**{'to_dict.return_value': {"test": 2}}),
)
self.pp = generic.PolicyPartitioner(self.driver)
self.clean_fs_mock = self.pp._do_clean_filesystems = mock.Mock()
self.part_mock = self.pp._do_partitioning = mock.Mock()
def test_partition_verify(self, cmp_mock):
self.setup('verify', cmp_mock)
self.pp.partition()
cmp_mock().assert_no_diff.assert_called_once_with(
{'test': 1}, {'test': 2}
)
self.clean_fs_mock.assert_has_calls([])
self.part_mock.assert_has_calls([])
def test_partition_preserve(self, cmp_mock):
pass
def test_partition_nailgun_legacy_skip(self, cmp_mock):
self.setup('nailgun_legacy', cmp_mock,
keep_data_flag_present=True)
self.pp.partition()
self.clean_fs_mock.assert_called_once_with()
self.part_mock.assert_has_calls([])
def test_partition_nailgun_legacy_partition(self, cmp_mock):
self.setup('nailgun_legacy', cmp_mock,
keep_data_flag_present=False)
self.pp.partition()
self.clean_fs_mock.assert_has_calls([])
self.part_mock.assert_called_once_with()
def test_partition_clean(self, cmp_mock):
self.setup('clean', cmp_mock)
verify_mock = self.pp._verify_disk_size = mock.Mock()
self.pp.partition()
cmp_mock().assert_no_diff.assert_has_calls([])
self.clean_fs_mock.assert_has_calls([])
self.part_mock.assert_called_once_with()
verify_mock.assert_called_once_with(
self.driver.partition_scheme.parteds,
self.driver.hu_disks)
def test_unknown_policy(self, cmp_mock):
self.setup('non-existent', cmp_mock)
self.assertRaises(errors.WrongPartitionPolicyError,
self.pp.partition)
class TestPartitionSchemaCompareTool(unittest2.TestCase):
def setUp(self):
super(TestPartitionSchemaCompareTool, self).setUp()
self.comp = generic.PartitionSchemaCompareTool()
# Points to pay attention:
# Some keep data flags are set, which are translated to False.
self.user_schema = {
'pvs': [], 'lvs': [],
'fss': [
{'keep_data': True, 'mount': u'/', 'fs_label': '',
'fs_type': u'ext4', 'fs_options': '', 'device': '/dev/vda2'},
{'keep_data': True, 'mount': u'/usr', 'fs_label': '',
'fs_type': u'ext4', 'fs_options': '', 'device': '/dev/vda3'},
{'keep_data': True, 'mount': u'swap', 'fs_label': '',
'fs_type': u'swap', 'fs_options': '', 'device': '/dev/vda4'},
],
'parteds': [
{'install_bootloader': True,
'partitions': [
{'count': 1, 'begin': 1, 'end': 25,
'name': '/dev/vda1', 'keep_data': False,
'device': '/dev/vda', 'flags': ['bios_grub'],
'guid': None, 'configdrive': False,
'partition_type': 'primary'},
{'count': 2, 'begin': 25, 'end': 4025,
'name': '/dev/vda2', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'},
{'count': 3, 'begin': 4025, 'end': 7025,
'name': '/dev/vda3', 'keep_data': True,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False,
'partition_type': 'primary'},
{'count': 3, 'begin': 7025, 'end': 8025,
'name': '/dev/vda4', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False,
'partition_type': 'primary'}
],
'name': '/dev/vda', 'label': 'gpt'}], 'mds': [], 'vgs': []}
# Has extra disk - vdb, which is ignored.
self.hw_schema = {
'pvs': [], 'lvs': [],
'fss': [
{'keep_data': False, 'mount': '/', 'fs_label': '',
'fs_type': 'ext4', 'fs_options': '', 'device': '/dev/vda2'},
{'keep_data': False, 'mount': '/usr', 'fs_label': '',
'fs_type': 'ext4', 'fs_options': '', 'device': '/dev/vda3'},
{'keep_data': False, 'mount': 'swap', 'fs_label': '',
'fs_type': 'linux-swap(v1)', 'fs_options': '',
'device': '/dev/vda4'}
],
'parteds': [
{'install_bootloader': True,
'partitions': [
{'count': 1, 'begin': 1, 'end': 25,
'name': '/dev/vda1', 'keep_data': False,
'device': '/dev/vda', 'flags': ['bios_grub'],
'guid': None, 'configdrive': False,
'partition_type': 'primary'},
{'count': 2, 'begin': 25, 'end': 4025,
'name': '/dev/vda2', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'},
{'count': 3, 'begin': 4025, 'end': 7025,
'name': '/dev/vda3', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'},
{'count': 3, 'begin': 7025, 'end': 8025,
'name': '/dev/vda4', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False,
'partition_type': 'primary'}
],
'name': '/dev/vda', 'label': 'gpt'},
{'install_bootloader': False,
'partitions': [
{'count': 1, 'begin': 0, 'end': 101,
'name': '/dev/vdb1', 'keep_data': False,
'device': '/dev/vdb', 'flags': [],
'guid': None, 'configdrive': False,
'partition_type': None}],
'name': '/dev/vdb', 'label': 'loop'}],
'mds': [], 'vgs': []}
def test_match(self):
self.comp.assert_no_diff(self.user_schema, self.hw_schema)
def test_mismatch_extra_part_in_user_schema(self):
self.user_schema['parteds'][0]['partitions'].append({
'count': 3, 'begin': 4025, 'end': 7025,
'name': '/dev/vda4', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'
})
self.assertRaises(errors.PartitionSchemeMismatchError,
self.comp.assert_no_diff,
self.user_schema, self.hw_schema)
def test_mismatch_extra_disk_in_user_schema(self):
self.user_schema['parteds'].append({
'install_bootloader': True,
'partitions': [
{'count': 1, 'begin': 0, 'end': 101,
'name': '/dev/vdc1', 'keep_data': True,
'device': '/dev/vdc', 'flags': [],
'guid': None, 'configdrive': False,
'partition_type': None}],
'name': '/dev/vdc', 'label': 'loop'
})
self.assertRaises(errors.PartitionSchemeMismatchError,
self.comp.assert_no_diff,
self.user_schema, self.hw_schema)
def test_mismatch_extra_part_on_hw_schema(self):
self.hw_schema['parteds'][0]['partitions'].append({
'count': 3, 'begin': 4025, 'end': 7025,
'name': '/dev/vda4', 'keep_data': False,
'device': '/dev/vda', 'flags': [], 'guid': None,
'configdrive': False, 'partition_type': 'primary'
})
self.assertRaises(errors.PartitionSchemeMismatchError,
self.comp.assert_no_diff,
self.user_schema, self.hw_schema)

View File

@ -12,15 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import io
import mock
import six
from six import StringIO
import unittest2
from bareon import errors
from bareon.utils import grub as gu
if six.PY2:
OPEN_FUNCTION_NAME = '__builtin__.open'
else:
OPEN_FUNCTION_NAME = 'builtins.open'
class TestGrubUtils(unittest2.TestCase):
@ -447,6 +453,7 @@ title Default (kernel-version-set)
gu.grub2_install(['/dev/foo', '/dev/bar'], chroot='/target')
self.assertEqual(mock_exec.call_args_list, expected_calls)
@unittest2.skip("Fix after cray rebase")
@mock.patch('bareon.utils.grub.guess_grub2_conf')
@mock.patch('bareon.utils.grub.guess_grub2_mkconfig')
@mock.patch('bareon.utils.grub.utils.execute')
@ -455,14 +462,10 @@ title Default (kernel-version-set)
mock_def.return_value = '/etc/default/grub'
mock_mkconfig.return_value = '/sbin/grub-mkconfig'
mock_conf.return_value = '/boot/grub/grub.cfg'
orig_content = """foo
GRUB_CMDLINE_LINUX="kernel-params-orig"
bar"""
new_content = """foo
GRUB_CMDLINE_LINUX="kernel-params-new"
bar
GRUB_RECORDFAIL_TIMEOUT=10
"""
with mock.patch('bareon.utils.grub.open',
new=mock.mock_open(read_data=orig_content),
@ -479,7 +482,9 @@ GRUB_RECORDFAIL_TIMEOUT=10
mock.call('/target/etc/default/grub', 'wt', encoding='utf-8')]
)
handle.write.assert_called_once_with(new_content)
gu.grub2_cfg(kernel_params='kernel-params-new', chroot='/target',
grub_timeout=10)
mock_exec.assert_called_once_with('chroot', '/target',
'/sbin/grub-mkconfig',
'-o', '/boot/grub/grub.cfg',

View File

@ -271,9 +271,11 @@ supports-register-dump: yes
# should return False if udev MAJOR is not in a list of
# major numbers which are used for disks
# look at kernel/Documentation/devices.txt
# KVM virtio volumes have major number 254 in Debian
mock_breport.return_value = {}
valid_majors = [3, 8, 9, 65, 66, 67, 68, 69, 70, 71, 104, 105,
106, 107, 108, 109, 110, 111, 202, 252, 253, 259]
valid_majors = [3, 8, 9, 259, 65, 66, 67, 68, 69, 70, 71, 202, 104,
105, 106, 107, 108, 109, 110, 111, 252, 253, 254]
for major in (set(range(1, 261)) - set(valid_majors)):
uspec = {
'MAJOR': str(major)
@ -328,6 +330,17 @@ E: MINOR=0
E: SUBSYSTEM=block
E: USEC_INITIALIZED=87744
P: /devices/virtual/block/dm-0
N: dm-0
E: DEVNAME=/dev/dm-0
E: DEVPATH=/devices/vertual/block/dm-0
E: DEVTYPE=disk
E: MAJOR=259
E: MINOR=0
E: SUBSYSTEM=block
E: DM_VG_NAME=swap
E: USEC_INITIALIZED=87744
P: /devices/pci0000:00/0000:00:1c.1/target16:0:0/16:0:0:0/block/sr0
E: DEVTYPE=disk
E: DEVNAME=/dev/sr0
@ -347,9 +360,81 @@ E: SUBSYSTEM=block
E: MAJOR=8
E: UDEV_LOG=3""", '')
self.assertEqual(['/dev/sda', '/dev/nvme0n1', '/dev/sda1'],
self.assertEqual(['/dev/sda', '/dev/nvme0n1'],
hu.get_block_devices_from_udev_db())
@mock.patch('bareon.utils.hardware.utils.execute')
def test_get_vg_devices_from_udev_db(self, mock_exec):
mock_exec.return_value = ("""P: /devices/virtual/block/loop0
N: loop0
E: DEVNAME=/dev/loop0
E: DEVPATH=/devices/virtual/block/loop0
E: DEVTYPE=disk
E: MAJOR=7
E: SUBSYSTEM=block
P: /devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda
N: sda
S: disk/by-id/wwn-0x5000c5004008ac0f
S: disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0
E: DEVNAME=/dev/sda
E: DEVTYPE=disk
E: ID_ATA=1
E: MAJOR=8
E: SUBSYSTEM=block
E: UDEV_LOG=3
P: /devices/pci:00/:00:04.0/misc/nvme0
N: nvme0
E: DEVNAME=/dev/nvme0
E: DEVPATH=/devices/pci:00/:00:04.0/misc/nvme0
E: MAJOR=10
E: MINOR=57
E: SUBSYSTEM=misc
P: /devices/pci:00/:00:04.0/block/nvme0n1
N: nvme0n1
E: DEVNAME=/dev/nvme0n1
E: DEVPATH=/devices/pci:00/:00:04.0/block/nvme0n1
E: DEVTYPE=disk
E: MAJOR=259
E: MINOR=0
E: SUBSYSTEM=block
E: USEC_INITIALIZED=87744
P: /devices/virtual/block/dm-0
N: dm-0
E: DEVNAME=/dev/dm-0
E: DEVPATH=/devices/vertual/block/dm-0
E: DEVTYPE=disk
E: MAJOR=259
E: MINOR=0
E: SUBSYSTEM=block
E: DM_VG_NAME=swap
E: USEC_INITIALIZED=87744
P: /devices/pci0000:00/0000:00:1c.1/target16:0:0/16:0:0:0/block/sr0
E: DEVTYPE=disk
E: DEVNAME=/dev/sr0
E: MAJOR=11
E: MINOR=0
E: SEQNUM=4400
E: SUBSYSTEM=block
P: /devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda
N: sda
S: disk/by-id/wwn-0x5000c5004008ac0f
S: disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0
E: DEVNAME=/dev/sda1
E: DEVTYPE=partition
E: ID_ATA=1
E: SUBSYSTEM=block
E: MAJOR=8
E: UDEV_LOG=3""", '')
self.assertEqual(['/dev/dm-0'],
hu.get_vg_devices_from_udev_db())
@mock.patch.object(hu, 'get_block_devices_from_udev_db')
@mock.patch.object(hu, 'is_disk')
@mock.patch.object(hu, 'extrareport')
@ -484,3 +569,75 @@ E: UDEV_LOG=3""", '')
hu.is_block_device(filepath)
mock_os_stat.assert_called_once_with(filepath)
self.assertTrue(mock_isblk.called)
@mock.patch.object(hu, 'udevreport')
def test_get_device_ids(self, mock_udevreport):
mock_udevreport.return_value = {
'DEVLINKS': ['/dev/disk/by-label/label',
'/dev/disk/by-id/id',
'/dev/disk/by-path/path']}
part = '/dev/sda'
desired = {'name': '/dev/sda',
'paths': ['disk/by-label/label',
'disk/by-id/id',
'disk/by-path/path']}
result = hu.get_device_ids(part)
mock_udevreport.assert_called_once_with(part)
self.assertDictEqual(result, desired)
def test_is_valid_dev_type(self):
device_info = {
'E: DEVNAME': '/dev/a',
'E: MAJOR': 1,
'E: DM_VG_NAME': 'group_name'
}
for mid in hu.VALID_MAJORS:
device_info['E: MAJOR'] = mid
self.assertTrue(hu._is_valid_dev_type(device_info, vg=True))
def test_is_valid_dev_type_not_vg(self):
device_info = {
'E: DEVNAME': '/dev/a',
'E: MAJOR': 1,
'E: DM_VG_NAME': 'group_name'
}
for mid in hu.VALID_MAJORS:
device_info['E: MAJOR'] = mid
self.assertFalse(hu._is_valid_dev_type(device_info, vg=False))
def test_is_valid_dev_type_wrong_major_test(self):
device_info = {
'E: DEVNAME': '/dev/a',
'E: MAJOR': 1,
'E: DM_VG_NAME': 'group_name'
}
for mid in set(range(0, 300)) - set(hu.VALID_MAJORS):
device_info['E: MAJOR'] = mid
self.assertFalse(hu._is_valid_dev_type(device_info, vg=True))
def test_is_valid_dev_type_devname_test(self):
device_info = {
'E: DEVNAME': '/dev/looptest',
'E: MAJOR': 3,
'E: DM_VG_NAME': 'group_name'
}
self.assertFalse(hu._is_valid_dev_type(device_info, vg=True))
def test_is_valid_dev_type_vg_test(self):
device_info = {
'E: DEVNAME': '/dev/a',
'E: MAJOR': 1,
'E: DM_VG_NAME': 'group_name'
}
for mid in hu.VALID_MAJORS:
device_info['E: MAJOR'] = mid
self.assertTrue(hu._is_valid_dev_type(device_info, vg=True))
def test_is_valid_dev_type_not_vg_test(self):
device_info = {
'E: DEVNAME': '/dev/a',
'E: MAJOR': 3,
}
self.assertFalse(hu._is_valid_dev_type(device_info, vg=True))

445
bareon/tests/test_ironic.py Normal file
View File

@ -0,0 +1,445 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from bareon.drivers.data import ironic
PROVISION_SAMPLE_DATA_SWIFT = {
"partitions": [
{
"id": {
"type": "name",
"value": "sda"
},
"volumes": [
{
"mount": "/boot",
"size": "200",
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/tmp",
"size": "200",
"type": "partition",
"file_system": "ext2",
"partition_guid": "fake_guid",
"name": "TMP"
},
{
"size": "19438",
"type": "pv",
"lvm_meta_size": "64",
"vg": "os"
},
{
"size": "45597",
"type": "pv",
"lvm_meta_size": "64",
"vg": "image"
}
],
"type": "disk",
"size": "65587"
},
{
"id": {
"type": "scsi",
"value": "1:0:0:0"
},
"volumes": [
{
"mount": "/boot",
"size": "200",
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"size": "0",
"type": "pv",
"lvm_meta_size": "10",
"vg": "os"
},
{
"size": "64971",
"type": "pv",
"lvm_meta_size": "64",
"vg": "image"
}
],
"type": "disk",
"size": "65587"
},
{
'id': {
'type': 'path',
'value': 'disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0'
},
"volumes": [
{
"mount": "/boot",
"size": "200",
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"size": "19374",
"type": "pv",
"lvm_meta_size": "10",
"vg": "os"
},
{
"size": "175347",
"type": "pv",
"lvm_meta_size": "64",
"vg": "image"
}
],
"type": "disk",
"size": "195019"
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 19374,
"volumes": [
{
"mount": "/",
"size": "15360",
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": "4014",
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 5120,
"volumes": [
{
"mount": "/var/lib/glance",
"size": "175347",
"type": "lv",
"name": "glance",
"file_system": "xfs"
}
],
"type": "vg",
"id": "image"
},
{
"id": {
"type": "name",
"value": "sdd"
},
"volumes": [
{
"mount": "/var",
"size": "0",
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/tmp",
"size": "0",
"type": "partition",
"file_system": "ext2",
"partition_guid": "fake_guid",
"name": "TMP"
}
],
"type": "disk",
"size": "65587"
}
]
}
PROVISION_SAMPLE_DATA_RSYNC = {
'ks_meta': {
'rsync_root_path': "10.10.10.1::testroot/path",
"pm_data": {
"kernel_params": "console=ttyS0,9600 console=tty0 rootdelay=90 "
"nomodeset", },
'profile': ''
}
}
LIST_BLOCK_DEVICES_SAMPLE = [
{'scsi': '0:0:0:0',
'name': 'sda',
'device': '/dev/sda',
'path': [
'/dev/disk/by-id/ata-VBOX_HARDDISK',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK',
'/dev/disk/by-id/wwn-fake_wwn_1']},
{'scsi': '1:0:0:0',
'name': 'sdb',
'device': '/dev/sdb',
'path': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/wwn-fake_wwn_2']},
{'scsi': '2:0:0:0',
'name': 'sdc',
'device': '/dev/sdc',
'path': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/wwn-fake_wwn_3',
'/dev/disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0']},
{'scsi': '3:0:0:0',
'name': 'sdd',
'device': '/dev/sdd',
'path': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84fdf',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50e74fdf',
'/dev/disk/by-id/wwn-fake_wwn_3',
'/dev/disk/by-path/pci-0000:00:0d.0-scsi3:0:0:0']},
]
class TestIronicMatch(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestIronicMatch, self).__init__(*args, **kwargs)
self.data_driver = ironic.Ironic('')
def test_match_device_by_scsi_matches(self):
# matches by scsi address
fake_ks_disk = {
'id': {
'type': 'scsi',
'value': '0:0:0:1'
}
}
fake_hu_disk = {
'scsi': '0:0:0:1'
}
self.assertTrue(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_scsi_not_matches(self):
# matches by scsi address
fake_ks_disk = {
'id': {
'type': 'scsi',
'value': '0:0:0:1'
}
}
fake_hu_disk = {
'scsi': '5:0:0:1'
}
self.assertFalse(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_path_matches(self):
fake_ks_disk = {
'id': {
'type': 'path',
'value': 'disk/by-path/pci-0000:00:07.0-virtio-pci-virtio3'
}
}
fake_hu_disk = {
'path': [
"/dev/disk/by-path/pci-0000:00:07.0-virtio-pci-virtio3",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
self.assertTrue(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_path_not_matches(self):
fake_ks_disk = {
'id': {
'type': 'path',
'value': 'disk/by-path/pci-0000:00:07.0-virtio-pci-virtio3'
}
}
fake_hu_disk = {
'path': [
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
self.assertFalse(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_name_matches(self):
fake_ks_disk = {
'id': {
'type': 'name',
'value': 'sda'
}
}
fake_hu_disk = {
'name': '/dev/sda'
}
self.assertTrue(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_by_name_not_matches(self):
fake_ks_disk = {
'id': {
'type': 'name',
'value': 'sda'
}
}
fake_hu_disk = {
'name': '/dev/sdd'
}
self.assertFalse(
self.data_driver._match_device(fake_hu_disk, fake_ks_disk))
@mock.patch('bareon.drivers.data.ironic.hu.scsi_address')
class TestNailgunMockedMeta(unittest2.TestCase):
def test_partition_scheme(self, mock_scsi_address):
data_driver = ironic.Ironic(PROVISION_SAMPLE_DATA_SWIFT)
data_driver.get_image_ids = mock.MagicMock
mock_devices = data_driver._get_block_devices = mock.MagicMock()
mock_devices.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_scheme = data_driver.partition_scheme
self.assertEqual(5, len(p_scheme.fss))
self.assertEqual(5, len(p_scheme.pvs))
self.assertEqual(3, len(p_scheme.lvs))
self.assertEqual(2, len(p_scheme.vgs))
self.assertEqual(3, len(p_scheme.parteds))
@mock.patch('bareon.drivers.data.ironic.hu.get_block_devices_from_udev_db')
class TestGetBlockDevices(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestGetBlockDevices, self).__init__(*args, **kwargs)
self.driver = ironic.Ironic('')
self.mock_devices = mock.MagicMock()
self.driver._get_block_device_info = self.mock_devices
def test_no_devices(self, mock_get_block_devices_from_udev_db):
mock_get_block_devices_from_udev_db.return_value = []
result = self.driver._get_block_devices()
self.assertEqual(result, [])
mock_get_block_devices_from_udev_db.assert_called_once_with()
self.assertEqual(self.mock_devices.call_count, 0)
def test_device_info(self, mock_get_block_devices_from_udev_db):
data = {'test': 'fake'}
mock_get_block_devices_from_udev_db.return_value = [data]
self.mock_devices.return_value = block_device = 'test_value'
result = self.driver._get_block_devices()
self.assertEqual(result, [block_device])
mock_get_block_devices_from_udev_db.assert_called_once_with()
self.mock_devices.assert_called_once_with(data)
@mock.patch('bareon.drivers.data.ironic.hu.get_device_ids')
@mock.patch('bareon.drivers.data.ironic.hu.get_device_info')
@mock.patch('bareon.drivers.data.ironic.hu.scsi_address')
class TestGetBlockDevice(unittest2.TestCase):
def test_no_device_info(self, mock_scsi_address, mock_get_device_info,
mock_get_device_ids):
data_driver = ironic.Ironic('')
device = 'fake_device'
mock_scsi_address.return_value = None
mock_get_device_info.return_value = {}
mock_get_device_ids.return_value = []
result = data_driver._get_block_device_info(device)
self.assertEqual(result, {'name': 'fake_device'})
def test_device_info(self, mock_scsi_address, mock_get_device_info,
mock_get_device_ids):
data_driver = ironic.Ironic('')
device = 'fake_device'
devpath = ['test/devpath']
uspec = {'DEVPATH': devpath}
mock_get_device_info.return_value = {
'uspec': uspec
}
mock_scsi_address.return_value = scsi_address = '1:0:0:0'
mock_get_device_ids.return_value = devpath
desired = {'path': devpath, 'name': device, 'scsi': scsi_address,
'uspec': uspec}
result = data_driver._get_block_device_info(device)
self.assertEqual(result, desired)
mock_get_device_info.assert_called_once_with(device)
mock_scsi_address.assert_called_once_with(device)
class TestGetGrub(unittest2.TestCase):
@mock.patch('bareon.utils.utils.parse_kernel_cmdline')
def test_kernel_params(self, cmdline_mock):
data = {'deploy_data': {'kernel_params': "test_param=test_val",
'other_data': 'test'},
'partitions': 'fake_shema'}
cmdline_mock.return_value = {
"BOOTIF": "01-52-54-00-a5-55-58",
"extrastuff": "test123"
}
data_driver = ironic.Ironic(data)
self.assertEqual('test_param=test_val BOOTIF=01-52-54-00-a5-55-58',
data_driver.grub.kernel_params)
def test_no_kernel_params(self):
data = {'deploy_data': {'other_data': "test"},
'partitions': 'fake_shema'}
data_driver = ironic.Ironic(data)
self.assertEqual('', data_driver.grub.kernel_params)
class TestPartitionsPolicy(unittest2.TestCase):
def test_partitions_policy(self):
data = {'partitions_policy': "test_value",
'partitions': 'fake_shema'}
data_driver = ironic.Ironic(data)
self.assertEqual('test_value', data_driver.partitions_policy)
def test_partitions_policy_default(self):
data = {'partitions': 'fake_shema'}
data_driver = ironic.Ironic(data)
self.assertEqual('verify', data_driver.partitions_policy)

View File

@ -0,0 +1,949 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import mock
import unittest2
from bareon.drivers.data import ironic
from bareon import errors
from bareon.utils import hardware as hu
from bareon.utils import utils
class TestGetImageSchema(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestGetImageSchema, self).__init__(*args, **kwargs)
def test_get_image_schema(self):
image_uri = 'test_uri'
rsync_flags = '-a -X'
deploy_flags = {'rsync_flags': rsync_flags}
data = {'images': [
{
'image_pull_url': image_uri,
'target': '/',
'name': 'test'
}
], 'image_deploy_flags': deploy_flags}
self.driver = ironic.Ironic(data)
result = self.driver._get_image_scheme()
self.assertEqual(len(result.images), 1)
result_image = result.images[0]
self.assertEqual(result_image.deployment_flags, deploy_flags)
self.assertEqual(result_image.uri, image_uri)
class TestMatchDevice(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestMatchDevice, self).__init__(*args, **kwargs)
self.driver = ironic.Ironic(None)
def test_match_list_value(self):
test_type = 'path'
test_value = 'test_path'
ks_disk = {'id': {'type': test_type, 'value': test_value}}
hu_disk = {test_type: ['path1', test_value]}
result = self.driver._match_device(hu_disk, ks_disk)
self.assertTrue(result)
def test_not_match_list_value(self):
test_type = 'path'
test_value = 'test_path'
ks_disk = {'id': {'type': test_type, 'value': test_value}}
hu_disk = {test_type: ['path1', 'path2']}
result = self.driver._match_device(hu_disk, ks_disk)
self.assertFalse(result)
def test_match_one_value(self):
test_type = 'path'
test_value = 'test_path'
ks_disk = {'id': {'type': test_type, 'value': test_value}}
hu_disk = {test_type: test_value}
result = self.driver._match_device(hu_disk, ks_disk)
self.assertTrue(result)
def test_not_match_one_value(self):
test_type = 'path'
test_value = 'test_path'
ks_disk = {'id': {'type': test_type, 'value': test_value}}
hu_disk = {test_type: 'path1'}
result = self.driver._match_device(hu_disk, ks_disk)
self.assertFalse(result)
class TestDiskDev(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestDiskDev, self).__init__(*args, **kwargs)
self.driver = ironic.Ironic(None)
self.driver._match_device = self.mock_match_device = mock.MagicMock()
def test_no_valid_disks(self):
self.mock_match_device.side_effect = [False, False, False]
self.driver._hu_disks = [{'name': 'disk1'},
{'name': 'disk2'},
{'name': 'disk3'}]
ks_disk = {'id': {'type': 'name', 'value': 'not_found'}}
self.assertRaises(errors.DiskNotFoundError, self.driver._disk_dev,
ks_disk)
def test_more_than_one_valid_disk(self):
self.mock_match_device.side_effect = [True, False, True]
self.driver._hu_disks = [{'name': 'disk1', 'device': 'disk1'},
{'name': 'disk2'},
{'name': 'disk3', 'device': 'disk3'}]
ks_disk = {'id': {'type': 'name', 'value': 'ks_disk'}}
self.assertRaises(errors.DiskNotFoundError, self.driver._disk_dev,
ks_disk)
def test_one_valid_disk(self):
ks_disk = 'ks_disk'
self.mock_match_device.side_effect = [True, False, False]
self.driver._hu_disks = [{'name': 'disk1', 'device': ks_disk},
{'name': 'disk2'},
{'name': 'disk3'}]
result = self.driver._disk_dev(None)
self.assertEqual(result, 'disk1')
class TestMatchPartition(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestMatchPartition, self).__init__(*args, **kwargs)
self.driver = ironic.Ironic(None)
def test_match_list_value(self):
test_type = 'path'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value}}
hu_partition = {test_type: ['path1', test_value]}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertTrue(result)
def test_match_list_value_wildcard(self):
test_type = 'path'
test_value_wc = 'test_*'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value_wc}}
hu_partition = {test_type: ['path1', test_value]}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertTrue(result)
def test_not_match_list_value(self):
test_type = 'path'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value}}
hu_partition = {test_type: ['path1', 'path2']}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertFalse(result)
def test_not_match_list_value_wildcard(self):
test_type = 'path'
test_value_wc = 'test_*'
ks_partition = {'id': {'type': test_type, 'value': test_value_wc}}
hu_partition = {test_type: ['path1', 'path2']}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertFalse(result)
def test_match_one_value(self):
test_type = 'path'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value}}
hu_partition = {test_type: test_value}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertTrue(result)
def test_match_one_value_wildcard(self):
test_type = 'path'
test_value_wc = 'test_*'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value_wc}}
hu_partition = {test_type: test_value}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertTrue(result)
def test_not_match_one_value(self):
test_type = 'path'
test_value = 'test_path'
ks_partition = {'id': {'type': test_type, 'value': test_value}}
hu_partition = {test_type: 'path1'}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertFalse(result)
def test_not_match_one_wildcard(self):
test_type = 'path'
test_value_wc = 'test_*'
ks_partition = {'id': {'type': test_type, 'value': test_value_wc}}
hu_partition = {test_type: 'path1'}
result = self.driver._match_data_by_pattern(hu_partition, ks_partition)
self.assertFalse(result)
class TestDiskPartition(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestDiskPartition, self).__init__(*args, **kwargs)
self.driver = ironic.Ironic(None)
self.driver._match_data_by_pattern = \
self.mock_match_part = mock.MagicMock()
def test_no_valid_disks(self):
self.mock_match_part.side_effect = [False, False, False]
self.driver._hu_partitions = [{'name': 'disk1'},
{'name': 'disk2'},
{'name': 'disk3'}]
ks_disk = {'id': {'type': 'name', 'value': 'ks_disk'}}
self.assertRaises(errors.DiskNotFoundError,
self.driver._disk_partition, ks_disk)
def test_more_than_one_valid_disk(self):
self.mock_match_part.side_effect = [True, False, True]
self.driver._hu_partitions = [{'name': 'disk1', 'device': 'disk1'},
{'name': 'disk2'},
{'name': 'disk3', 'device': 'disk3'}]
ks_disk = {'id': {'type': 'name', 'value': 'ks_disk'}}
self.assertRaises(errors.DiskNotFoundError,
self.driver._disk_partition, ks_disk)
def test_one_valid_disk(self):
desired = ks_disk = 'ks_disk'
self.mock_match_part.side_effect = [True, False, False]
self.driver._hu_partitions = [{'name': ks_disk},
{'name': 'disk2'},
{'name': 'disk3'}]
result = self.driver._disk_partition(None)
self.assertEqual(result, desired)
@mock.patch('bareon.utils.hardware.get_partitions_from_udev_db')
@mock.patch('bareon.utils.hardware.get_device_ids')
class TestGetPartitionIds(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestGetPartitionIds, self).__init__(*args, **kwargs)
self.driver = ironic.Ironic(None)
def test_no_devices(self, mock_ids, mock_partitions):
mock_partitions.return_value = []
desired = []
result = self.driver._get_device_ids(dev_type=hu.PARTITION)
self.assertEqual(result, desired)
self.assertFalse(mock_ids.called)
def test_no_ids_on_devices(self, mock_ids, mock_partitions):
mock_partitions.return_value = parts = ['/dev/sda1', '/dev/sda2']
mock_ids.return_value = []
desired = []
result = self.driver._get_device_ids(dev_type=hu.PARTITION)
self.assertEqual(result, desired)
mock_ids.assert_has_calls([mock.call(part) for part in parts])
def test_success(self, mock_ids, mock_partitions):
mock_partitions.return_value = parts = ['/dev/sda1', '/dev/sda2']
mock_ids.side_effect = desired = [
{'name': '/dev/sda1'},
{'name': '/dev/sda2'}
]
result = self.driver._get_device_ids(dev_type=hu.PARTITION)
self.assertEqual(result, desired)
mock_ids.assert_has_calls([mock.call(part) for part in parts])
class TestFindHwFstab(unittest2.TestCase):
@mock.patch.object(utils, 'execute')
def test__find_hw_fstab_success_single_disk(self, exec_mock):
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='/', type='ext4', device='/dev/sda', os_id='1'),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id='1')]
data_driver = ironic.Ironic(None)
data_driver._partition_scheme = ironic.objects.PartitionScheme()
data_driver.partition_scheme.fss = fss
exec_mock.side_effect = [('stdout', 'stderr'),
('fstab_1', 'stderr'),
('stdout', 'stderr')]
res = data_driver._find_hw_fstab()
self.assertEqual('\n'.join(('fstab_1',)), res)
@mock.patch.object(utils, 'execute')
def test__find_hw_fstab_success_two_disk(self, exec_mock):
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='/', type='ext4', device='/dev/sda', os_id='1'),
fs(mount='/usr', type='ext4', device='/dev/sdb', os_id='1'),
fs(mount='/', type='ext4', device='/dev/sda', os_id='2')]
data_driver = ironic.Ironic(None)
data_driver._partition_scheme = ironic.objects.PartitionScheme()
data_driver.partition_scheme.fss = fss
exec_mock.side_effect = [('stdout', 'stderr'),
('fstab_1', 'stderr'),
('stdout', 'stderr'),
('stdout', 'stderr'),
('fstab_2', 'stderr'),
('stdout', 'stderr')]
res = data_driver._find_hw_fstab()
self.assertEqual('\n'.join(('fstab_1', 'fstab_2')), res)
@mock.patch.object(utils, 'execute')
def test__find_hw_fstab_fail_error_while_reading_fstba(self, exec_mock):
fs = namedtuple('fs', 'mount type device os_id')
fss = [fs(mount='/etc', type='ext4', device='/dev/sda', os_id='1'),
fs(mount='/', type='ext4', device='/dev/sda', os_id='1')]
data_driver = ironic.Ironic(None)
data_driver._partition_scheme = ironic.objects.PartitionScheme()
data_driver.partition_scheme.fss = fss
exec_mock.side_effect = [('stdout', 'stderr'),
errors.ProcessExecutionError,
('stdout', 'stderr')]
self.assertRaises(errors.HardwarePartitionSchemeCannotBeReadError,
data_driver._find_hw_fstab)
class TestConvertStringSize(unittest2.TestCase):
@mock.patch.object(ironic, 'human2bytes')
def test_success_single_disk(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': '5 GB',
'type': 'partition'},
{'file_system': 'ext4',
'mount': '/var',
'size': '4000',
'type': 'partition'}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('5 GB'), mock.call('4000')],
any_order=True)
@mock.patch.object(ironic, 'human2bytes')
def test_success_two_disks(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': '5 GB',
'type': 'partition'},
{'file_system': 'ext4',
'mount': '/var',
'size': '4000',
'type': 'partition'}]},
{'extra': [],
'id': {'type': 'name', 'value': 'vdb'},
'size': '2000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/usr',
'size': '2 GB',
'type': 'partition'}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('5 GB'), mock.call('4000'),
mock.call('2000 MB'), mock.call('2 GB')], any_order=True)
@mock.patch.object(ironic, 'human2bytes')
def test_success_lvm_meta_size(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': '5 GB',
'type': 'partition'},
{"size": "4 GB",
"type": "pv",
"lvm_meta_size": "64",
"vg": "os"
}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('5 GB'), mock.call('4 GB'),
mock.call('64')], any_order=True)
@mock.patch.object(ironic, 'human2bytes')
def test_success_ignore_percent(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': '50%',
'type': 'partition'},
{'file_system': 'ext4',
'mount': '/var',
'size': '4000',
'type': 'partition'}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('4000')],
any_order=True)
@mock.patch.object(ironic, 'human2bytes')
def test_success_ignore_remaining(self, mock_converter):
data = {'image_deploy_flags': {'rsync_flags': '-a -A -X'},
'partitions': [{'extra': [],
'id': {'type': 'name', 'value': 'vda'},
'size': '10000 MB',
'type': 'disk',
'volumes': [{'file_system': 'ext4',
'mount': '/',
'size': 'remaining',
'type': 'partition'},
{'file_system': 'ext4',
'mount': '/var',
'size': '4000',
'type': 'partition'}]}]}
ironic.convert_string_sizes(data)
mock_converter.assert_has_calls(
[mock.call('10000 MB'), mock.call('4000')],
any_order=True)
class TestHumantoBytesConverter(unittest2.TestCase):
def test_default_convertion(self):
result = ironic.human2bytes('1000', default='GiB')
self.assertEqual(result, 1024000)
def test_target_convertion(self):
result = ironic.human2bytes('1024 MiB', target='GiB')
self.assertEqual(result, 1)
def test_invalid_data(self):
self.assertRaises(ValueError, ironic.human2bytes, 'invalid data')
class TestConvertPercentSizes(unittest2.TestCase):
GRUB = ironic.DEFAULT_GRUB_SIZE
LVM = ironic.DEFAULT_LVM_META_SIZE
def test_single_disk_no_percent(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk', 'volumes': [{'size': 5000, 'type': 'partition'},
{'size': 4900,
'type': 'partition'}]}]
desired = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk', 'volumes': [{'size': 5000, 'type': 'partition'},
{'size': 4900,
'type': 'partition'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_percent(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk', 'volumes': [{'size': '50%', 'type': 'partition'},
{'size': 4900,
'type': 'partition'}]}]
desired = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk', 'volumes': [{'size': 5000, 'type': 'partition'},
{'size': 4900,
'type': 'partition'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_percent_unicode(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk', 'volumes': [{'size': u'50%', 'type': 'partition'},
{'size': 4900,
'type': 'partition'}]}]
desired = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk', 'volumes': [{'size': 5000, 'type': 'partition'},
{'size': 4900,
'type': 'partition'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_without_size(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'},
'type': 'disk', 'volumes': [{'size': '50%', 'type': 'partition'},
{'size': 4900,
'type': 'partition'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_insufficient_size(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk', 'volumes': [{'size': '50%', 'type': 'partition'},
{'size': 6000,
'type': 'partition'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_vg(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "100%",
'type': 'lv'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': 5000, 'type': 'partition'},
{'size': 4900, 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': 4900 - self.LVM,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_with_vg_insufficient_size(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "60%",
'type': 'lv'},
{'file_system': 'ext3',
'mount': '/media',
'name': 'media',
'size': "60%",
'type': 'lv'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_vg_size_more_than_100_percent(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "101%",
'type': 'lv'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_vg_lvm_meta_size(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home',
'lvm_meta_size': 49}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "100%",
'type': 'lv'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': 5000, 'type': 'partition'},
{'size': 4900, 'type': 'pv',
'vg': 'home',
'lvm_meta_size': 49}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': 4900 - 49,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_remaining(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition', 'mount': '/'},
{'size': 'remaining', 'type': 'partition',
'mount': '/home'}]}]
desired = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk',
'volumes': [{'size': 5000, 'type': 'partition', 'mount': '/'},
{'size': 5000 - self.GRUB, 'type': 'partition',
'mount': '/home'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_remaining_nothing_left(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk',
'volumes': [{'size': 10000 - self.GRUB, 'type': 'partition',
'mount': '/'},
{'size': 'remaining', 'type': 'partition',
'mount': '/home'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_remaining_insufficient_size(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk',
'volumes': [{'size': 'remaining', 'type': 'partition',
'mount': '/'},
{'size': 11000, 'type': 'partition',
'mount': '/home'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_lv_remaining(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'mount': '/',
'size': '50%',
'type': 'partition'},
{'size': '49%',
'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'mount': '/var',
'size': 'remaining',
'type': 'lv'},
{'mount': '/home',
'size': '30%',
'type': 'lv'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'mount': '/',
'size': 5000,
'type': 'partition'},
{'size': 4900,
'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'mount': '/var',
'size': 4836 - (int(0.3 * 4836)),
'type': 'lv'},
{'mount': '/home',
'size': int(0.3 * 4836),
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_with_pv_and_lv_remaining(self):
disk_size = 10000
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': disk_size,
'type': 'disk',
'volumes': [{'mount': '/',
'size': '50%',
'type': 'partition'},
{'size': 'remaining',
'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'mount': '/var',
'size': 'remaining',
'type': 'lv'},
{'mount': '/home',
'size': '30%',
'type': 'lv'}]}]
expected_partition_size = disk_size * 0.50
expected_home_pv_size = (disk_size - expected_partition_size -
ironic.DEFAULT_GRUB_SIZE)
expected_home_lv_size = int((expected_home_pv_size -
ironic.DEFAULT_LVM_META_SIZE) * 0.3)
expected_var_lv_size = (expected_home_pv_size - expected_home_lv_size -
ironic.DEFAULT_LVM_META_SIZE)
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': disk_size,
'type': 'disk',
'volumes': [{'mount': '/',
'size': expected_partition_size,
'type': 'partition'},
{'size': expected_home_pv_size,
'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'mount': '/var',
'size': expected_var_lv_size,
'type': 'lv'},
{'mount': '/home',
'size': expected_home_lv_size,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_multiple_remaining(self):
start_data = [
{'id': {'type': 'name', 'value': 'vda'}, 'size': 10000,
'type': 'disk',
'volumes': [{'size': 'remaining', 'type': 'partition',
'mount': '/'},
{'size': 'remaining', 'type': 'partition',
'mount': '/home'}]}]
self.assertRaises(ValueError, ironic._resolve_all_sizes, start_data)
def test_single_disk_with_vg_reverse_order(self):
start_data = [{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': "100%",
'type': 'lv'}]},
{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': '50%', 'type': 'partition'},
{'size': '49%', 'type': 'pv',
'vg': 'home'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': 5000, 'type': 'partition'},
{'size': 4900, 'type': 'pv',
'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': 4900 - self.LVM,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
def test_single_disk_with_vg_multiple_pv(self):
start_data = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [
{'size': 7000, 'type': 'pv', 'vg': 'home'}]},
{'id': {'type': 'name', 'value': 'vdb'},
'size': 5000,
'type': 'disk',
'volumes': [
{'size': 4000, 'type': 'pv', 'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': '50%',
'type': 'lv'}]}]
desired = [{'id': {'type': 'name', 'value': 'vda'},
'size': 10000,
'type': 'disk',
'volumes': [{'size': 7000, 'type': 'pv', 'vg': 'home'}]},
{'id': {'type': 'name', 'value': 'vdb'},
'size': 5000,
'type': 'disk',
'volumes': [{'size': 4000, 'type': 'pv', 'vg': 'home'}]},
{'id': 'home',
'type': 'vg',
'volumes': [{'file_system': 'ext3',
'mount': '/home',
'name': 'home',
'size': 5500 - self.LVM,
'type': 'lv'}]}]
result = ironic._resolve_all_sizes(start_data)
map(lambda r, d: self.assertDictEqual(r, d), result, desired)
class TestProcessPartition(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestProcessPartition, self).__init__(*args, **kwargs)
self.driver = ironic.Ironic(None)
self.driver._partition_data = self.mock_part_data = mock.MagicMock()
self.driver._add_partition = self.mock_add_part = mock.MagicMock()
self.mock_add_part.return_value = self.mock_part = mock.MagicMock()
self.driver.get_os_ids = self.mock_get_os_ids = mock.MagicMock()
self.driver.get_image_ids = self.mock_get_image_ids = mock.MagicMock()
def test_with_partition_guid(self):
mock_volume = {'partition_guid': 'test_guid'}
self.driver._process_partition(mock_volume, None, None, None)
self.mock_part.set_guid.assert_called_once_with('test_guid')
def test_no_mount_option(self):
mock_volume = {}
mock_part_schema = mock.MagicMock()
self.driver._process_partition(mock_volume, None, None, None)
self.assertEqual(mock_part_schema.call_count, 0)
def test_none_mount_option(self):
mock_volume = {'mount': 'none'}
mock_part_schema = mock.MagicMock()
self.driver._process_partition(mock_volume, None, None, None)
self.assertEqual(mock_part_schema.call_count, 0)
def test_non_boot_volume_non_default(self):
mock_volume = {'mount': '/', 'file_system': 'ext4',
'fstab_options': 'noatime', 'fstab_enabled': False,
'disk_label': 'test_label'}
part_schema = ironic.objects.PartitionScheme()
parted = part_schema.add_parted(name='test_parted', label='gpt')
self.driver._process_partition(mock_volume, None, parted,
part_schema)
self.assertEqual(len(part_schema.fss), 1)
fs = part_schema.fss[0]
self.assertEqual(fs.type, 'ext4')
self.assertEqual(fs.label, ' -L test_label ')
self.assertEqual(fs.fstab_options, 'noatime')
self.assertEqual(fs.fstab_enabled, False)
self.assertEqual(fs.mount, '/')
self.assertFalse(self.driver._boot_done)
def test_non_boot_volume_default(self):
mock_volume = {'mount': '/'}
part_schema = ironic.objects.PartitionScheme()
parted = part_schema.add_parted(name='test_parted', label='gpt')
self.driver._process_partition(mock_volume, None, parted,
part_schema)
self.assertEqual(len(part_schema.fss), 1)
fs = part_schema.fss[0]
self.assertEqual(fs.type, 'xfs')
self.assertEqual(fs.label, '')
self.assertEqual(fs.fstab_options, 'defaults')
self.assertEqual(fs.fstab_enabled, True)
self.assertEqual(fs.mount, '/')
self.assertFalse(self.driver._boot_done)
def test_already_boot_volume(self):
mock_volume = {'mount': '/boot'}
self.driver._boot_done = True
self.driver._process_partition(mock_volume, None, mock.MagicMock(),
mock.MagicMock())
self.assertTrue(self.driver._boot_done)
def test_boot_volume(self):
mock_volume = {'mount': '/boot'}
self.driver._process_partition(mock_volume, None, mock.MagicMock(),
mock.MagicMock())
self.assertTrue(self.driver._boot_done)

View File

@ -0,0 +1,242 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest2
from bareon.drivers.data import ks_spaces_validator as kssv
from bareon import errors
SAMPLE_SCHEME = [
{
"id": {
"type": "name",
"value": "sda"
},
"name": "sda",
"volumes": [
{
"type": "boot",
"size": "300"
},
{
"mount": "/boot",
"size": "200",
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": "0"
},
{
"size": "19438",
"type": "pv",
"lvm_meta_size": "64",
"vg": "os"
},
{
"size": "45597",
"type": "pv",
"lvm_meta_size": "64",
"vg": "image"
}
],
"type": "disk",
"size": "65535"
},
{
"id": {
"type": "name",
"value": "sdb"
},
"name": "sdb",
"volumes": [
{
"type": "boot",
"size": "300"
},
{
"mount": "/boot",
"size": "200",
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": "64"
},
{
"size": "0",
"type": "pv",
"lvm_meta_size": "0",
"vg": "os"
},
{
"size": "64971",
"type": "pv",
"lvm_meta_size": "64",
"vg": "image"
}
],
"type": "disk",
"size": "65535"
},
{
"name": "sdc",
"volumes": [
{
"type": "boot",
"size": "300"
},
{
"mount": "/boot",
"size": "200",
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": "64"
},
{
"size": "0",
"type": "pv",
"lvm_meta_size": "0",
"vg": "os"
},
{
"size": "64971",
"type": "pv",
"lvm_meta_size": "64",
"vg": "image"
}
],
"type": "disk",
"id": {
"type": "path",
"value": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0"
},
"size": "65535"
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 19374,
"volumes": [
{
"mount": "/",
"size": "15360",
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": "4014",
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 5120,
"volumes": [
{
"mount": "/var/lib/glance",
"size": "175347",
"type": "lv",
"name": "glance",
"file_system": "xfs"
}
],
"type": "vg",
"id": "image"
}
]
class TestKSSpacesValidator(unittest2.TestCase):
def setUp(self):
super(TestKSSpacesValidator, self).setUp()
self.fake_scheme = copy.deepcopy(SAMPLE_SCHEME)
def test_validate_ok(self):
kssv.validate(self.fake_scheme, 'ironic')
def test_validate_jsoschema_fail(self):
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, [{}], 'ironic')
def test_validate_no_disks_fail(self):
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme[-2:], 'ironic')
@unittest2.skip("Fix after cray rebase")
def test_validate_16T_root_volume_fail(self):
self.fake_scheme[3]['volumes'][0]['size'] = 16777216 + 1
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme, 'ironic')
def test_validate_volume_type_fail(self):
incorrect_values_for_type = [
False, True, 0, 1, None, object
]
for value in incorrect_values_for_type:
self.fake_scheme[0]['volumes'][1]['type'] = value
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme, 'ironic')
def test_validate_volume_size_fail(self):
incorrect_values_for_size = [
False, True, 0, 1, None, object
]
for value in incorrect_values_for_size:
self.fake_scheme[0]['volumes'][1]['size'] = value
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme, 'ironic')
def test_validate_device_id_fail(self):
incorrect_values_for_id = [
False, True, 0, 1, None, object
]
for value in incorrect_values_for_id:
self.fake_scheme[0]['id'] = value
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme, 'ironic')
def test_validate_missed_property(self):
required = ['id', 'size', 'volumes', 'type']
for prop in required:
fake = copy.deepcopy(self.fake_scheme)
del fake[0][prop]
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, fake, 'ironic')
def test_validate_missed_volume_property(self):
required = ['type', 'size', 'vg']
for prop in required:
fake = copy.deepcopy(self.fake_scheme)
del fake[0]['volumes'][3][prop]
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, fake, 'ironic')

View File

@ -16,7 +16,7 @@ import copy
import unittest2
from bareon.drivers import ks_spaces_validator as kssv
from bareon.drivers.data import ks_spaces_validator as kssv
from bareon import errors
SAMPLE_SCHEME = [
@ -198,6 +198,7 @@ class TestKSSpacesValidator(unittest2.TestCase):
self.assertRaises(errors.WrongPartitionSchemeError, kssv.validate,
self.fake_scheme[-2:])
@unittest2.skip("Fix after cray rebase")
def test_validate_16T_root_volume_fail(self):
self.fake_scheme[3]['volumes'][0]['size'] = 16777216 + 1
self.assertRaises(errors.WrongPartitionSchemeError, kssv.validate,

View File

@ -322,28 +322,6 @@ class TestLvmUtils(unittest2.TestCase):
lu.lvcreate('vgname', 'lvname', 1000)
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(lu, 'lvdisplay')
@mock.patch.object(utils, 'execute')
def test_lvcreate_old_lvm(self, mock_exec, mock_lvdisplay, mock_vgdisplay):
mock_vgdisplay.return_value = [{'name': 'vgname', 'free': 2000},
{'name': 'some'}]
mock_lvdisplay.return_value = [{'name': 'some'}]
mock_exec.side_effect = [
# pretend lvcreate does not support --yes option
# (which is the case for Ubuntu 14.04)
('--foo', ''),
('', '')
]
expected_calls = [
mock.call('lvcreate', '--help'),
mock.call('lvcreate', '-L', '1000m',
'-n', 'lvname', 'vgname',
check_exit_code=[0])
]
lu.lvcreate('vgname', 'lvname', 1000)
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_lvcreate_not_found(self, mock_exec, mock_vgdisplay):

View File

@ -20,9 +20,10 @@ from oslo_config import cfg
import six
import unittest2
from bareon.drivers import nailgun
import bareon
from bareon.drivers.data import nailgun as nailgun_data
from bareon.drivers.deploy import nailgun as nailgun_deploy
from bareon import errors
from bareon import manager
from bareon import objects
from bareon.tests import test_nailgun
from bareon.utils import artifact as au
@ -51,22 +52,23 @@ class FakeChain(object):
pass
@unittest2.skip("Fix after cray rebase")
class TestManager(unittest2.TestCase):
@mock.patch('bareon.drivers.nailgun.Nailgun.parse_image_meta',
@mock.patch('bareon.drivers.data.nailgun.Nailgun.parse_image_meta',
return_value={})
@mock.patch.object(hu, 'list_block_devices')
def setUp(self, mock_lbd, mock_image_meta):
super(TestManager, self).setUp()
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.mgr = manager.Manager(test_nailgun.PROVISION_SAMPLE_DATA)
self.mgr = nailgun_deploy.Manager(test_nailgun.PROVISION_SAMPLE_DATA)
@mock.patch('bareon.manager.open',
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.gu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
@mock.patch('bareon.drivers.deploy.nailgun.gu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'umount_target')
def test_do_bootloader_grub1_kernel_initrd_guessed(self, mock_umount,
mock_mount, mock_utils,
mock_gu, mock_open):
@ -95,12 +97,12 @@ class TestManager(unittest2.TestCase):
mock_gu.guess_kernel.assert_called_once_with(
regexp='fake_kernel_regexp', chroot='/tmp/target')
@mock.patch('bareon.manager.open',
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.gu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
@mock.patch('bareon.drivers.deploy.nailgun.gu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'umount_target')
def test_do_bootloader_grub1_kernel_initrd_set(self, mock_umount,
mock_mount, mock_utils,
mock_gu, mock_open):
@ -124,12 +126,12 @@ class TestManager(unittest2.TestCase):
self.assertFalse(mock_gu.guess_kernel.called)
@mock.patch('bareon.objects.bootloader.Grub', autospec=True)
@mock.patch('bareon.manager.open',
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.gu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
@mock.patch('bareon.drivers.deploy.nailgun.gu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'umount_target')
def test_do_bootloader_rootfs_uuid(self, mock_umount, mock_mount,
mock_utils, mock_gu, mock_open,
mock_grub):
@ -150,8 +152,8 @@ class TestManager(unittest2.TestCase):
'root=UUID=FAKE_ROOTFS_UUID ')
self.assertEqual(2, mock_grub.version)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
def test_do_bootloader_rootfs_not_found(self, mock_umount, mock_utils):
mock_utils.execute.return_value = ('fake', 'fake')
self.mgr.driver._partition_scheme = objects.PartitionScheme()
@ -162,12 +164,12 @@ class TestManager(unittest2.TestCase):
self.assertRaises(errors.WrongPartitionSchemeError,
self.mgr.do_bootloader)
@mock.patch('bareon.manager.open',
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.gu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
@mock.patch('bareon.drivers.deploy.nailgun.gu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'umount_target')
def test_do_bootloader_grub_version_changes(
self, mock_umount, mock_mount, mock_utils, mock_gu, mock_open):
# actually covers only grub1 related logic
@ -178,12 +180,12 @@ class TestManager(unittest2.TestCase):
chroot='/tmp/target')
self.assertEqual('expected_version', self.mgr.driver.grub.version)
@mock.patch('bareon.manager.open',
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.gu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
@mock.patch('bareon.drivers.deploy.nailgun.gu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'umount_target')
def test_do_bootloader_grub1(self, mock_umount, mock_mount, mock_utils,
mock_gu, mock_open):
# actually covers only grub1 related logic
@ -207,12 +209,12 @@ class TestManager(unittest2.TestCase):
self.assertFalse(mock_gu.grub2_cfg.called)
self.assertFalse(mock_gu.grub2_install.called)
@mock.patch('bareon.manager.open',
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.gu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
@mock.patch('bareon.drivers.deploy.nailgun.gu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'umount_target')
def test_do_bootloader_grub2(self, mock_umount, mock_mount, mock_utils,
mock_gu, mock_open):
# actually covers only grub2 related logic
@ -231,15 +233,16 @@ class TestManager(unittest2.TestCase):
self.assertFalse(mock_gu.grub1_cfg.called)
self.assertFalse(mock_gu.grub1_install.called)
@mock.patch('bareon.manager.gu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
@mock.patch('bareon.drivers.deploy.nailgun.gu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'umount_target')
def test_do_bootloader_writes(self, mock_umount, mock_mount, mock_utils,
mock_gu):
# actually covers only write() calls
mock_utils.execute.return_value = ('fake_UUID\n', None)
with mock.patch('bareon.manager.open', create=True) as mock_open:
with mock.patch('bareon.drivers.deploy.nailgun.open',
create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
self.mgr.do_bootloader()
expected_open_calls = [
@ -281,7 +284,7 @@ class TestManager(unittest2.TestCase):
mock_utils.makedirs_if_not_exists.assert_called_once_with(
'/tmp/target/etc/nailgun-agent')
@mock.patch('bareon.drivers.nailgun.Nailgun.parse_image_meta',
@mock.patch('bareon.drivers.data.nailgun.Nailgun.parse_image_meta',
return_value={})
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(fu, 'make_fs')
@ -295,7 +298,7 @@ class TestManager(unittest2.TestCase):
if volume['type'] == 'pv' and volume['vg'] == 'image':
volume['keep_data'] = True
self.mgr = manager.Manager(data)
self.mgr = bareon.drivers.deploy.nailgun(data)
self.mgr.do_partitioning()
mock_fu_mf_expected_calls = [
@ -304,10 +307,10 @@ class TestManager(unittest2.TestCase):
mock.call('swap', '', '', '/dev/mapper/os-swap')]
self.assertEqual(mock_fu_mf_expected_calls, mock_fu_mf.call_args_list)
@mock.patch.object(manager.os.path, 'exists')
@mock.patch.object(manager.utils, 'blacklist_udev_rules')
@mock.patch.object(manager.utils, 'unblacklist_udev_rules')
@mock.patch.object(manager.utils, 'execute')
@mock.patch.object(nailgun_deploy.os.path, 'exists')
@mock.patch.object(bareon.utils, 'blacklist_udev_rules')
@mock.patch.object(bareon.utils, 'unblacklist_udev_rules')
@mock.patch.object(bareon.utils, 'execute')
@mock.patch.object(mu, 'mdclean_all')
@mock.patch.object(lu, 'lvremove_all')
@mock.patch.object(lu, 'vgremove_all')
@ -342,10 +345,10 @@ class TestManager(unittest2.TestCase):
['/dev/sdb3', '/dev/sdc1'], 'default')],
mock_mu_m.call_args_list)
@mock.patch.object(manager.os.path, 'exists')
@mock.patch.object(manager.utils, 'blacklist_udev_rules')
@mock.patch.object(manager.utils, 'unblacklist_udev_rules')
@mock.patch.object(manager.utils, 'execute')
@mock.patch.object(nailgun_deploy.os.path, 'exists')
@mock.patch.object(bareon.utils, 'blacklist_udev_rules')
@mock.patch.object(bareon.utils, 'unblacklist_udev_rules')
@mock.patch.object(bareon.utils, 'execute')
@mock.patch.object(mu, 'mdclean_all')
@mock.patch.object(lu, 'lvremove_all')
@mock.patch.object(lu, 'vgremove_all')
@ -429,9 +432,9 @@ class TestManager(unittest2.TestCase):
mock.call('xfs', '', '', '/dev/mapper/image-glance')]
self.assertEqual(mock_fu_mf_expected_calls, mock_fu_mf.call_args_list)
@mock.patch('bareon.drivers.nailgun.Nailgun.parse_image_meta',
@mock.patch('bareon.drivers.data.nailgun.Nailgun.parse_image_meta',
return_value={})
@mock.patch('bareon.drivers.nailgun.Nailgun.parse_operating_system')
@mock.patch('bareon.drivers.data.nailgun.Nailgun.parse_operating_system')
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@mock.patch('yaml.load')
@ -505,7 +508,7 @@ class TestManager(unittest2.TestCase):
self.assertRaises(errors.WrongPartitionSchemeError,
self.mgr.do_configdrive)
@mock.patch.object(manager.os.path, 'exists')
@mock.patch.object(nailgun_deploy.os.path, 'exists')
@mock.patch.object(hu, 'is_block_device')
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@ -550,7 +553,7 @@ class TestManager(unittest2.TestCase):
mock.call('ext4', '/dev/mapper/os-root')]
self.assertEqual(mock_fu_ef_expected_calls, mock_fu_ef.call_args_list)
@mock.patch.object(manager.os.path, 'exists')
@mock.patch.object(nailgun_deploy.os.path, 'exists')
@mock.patch.object(hu, 'is_block_device')
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@ -579,7 +582,7 @@ class TestManager(unittest2.TestCase):
'TARGET processor .* does not exist'):
self.mgr.do_copyimage()
@mock.patch.object(manager.os.path, 'exists')
@mock.patch.object(nailgun_deploy.os.path, 'exists')
@mock.patch.object(hu, 'is_block_device')
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@ -609,7 +612,7 @@ class TestManager(unittest2.TestCase):
with self.assertRaisesRegexp(errors.WrongDeviceError, msg):
self.mgr.do_copyimage()
@mock.patch.object(manager.os.path, 'exists')
@mock.patch.object(nailgun_deploy.os.path, 'exists')
@mock.patch.object(hu, 'is_block_device')
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@ -645,7 +648,7 @@ class TestManager(unittest2.TestCase):
self.assertEqual(expected_md5_calls, mock_md5.call_args_list)
@mock.patch.object(hu, 'is_block_device')
@mock.patch.object(manager.os.path, 'exists')
@mock.patch.object(nailgun_deploy.os.path, 'exists')
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@mock.patch('yaml.load')
@ -676,11 +679,11 @@ class TestManager(unittest2.TestCase):
self.assertRaises(errors.ImageChecksumMismatchError,
self.mgr.do_copyimage)
@mock.patch('bareon.manager.fu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch('bareon.manager.open',
@mock.patch('bareon.drivers.deploy.nailgun.fu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.os', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.os', create=True)
def test_mount_target_mtab_is_link(self, mock_os, mock_open, mock_utils,
mock_fu):
mock_os.path.islink.return_value = True
@ -692,11 +695,11 @@ class TestManager(unittest2.TestCase):
mock_os.path.islink.assert_called_once_with('fake_chroot/etc/mtab')
mock_os.remove.assert_called_once_with('fake_chroot/etc/mtab')
@mock.patch('bareon.manager.fu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch('bareon.manager.open',
@mock.patch('bareon.drivers.deploy.nailgun.fu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.os', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.os', create=True)
def test_mount_target(self, mock_os, mock_open, mock_utils, mock_fu):
mock_os.path.islink.return_value = False
self.mgr.driver._partition_scheme = objects.PartitionScheme()
@ -747,7 +750,7 @@ none /run/shm tmpfs rw,nosuid,nodev 0 0"""
mock_os.path.islink.assert_called_once_with('fake_chroot/etc/mtab')
self.assertFalse(mock_os.remove.called)
@mock.patch('bareon.manager.fu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.fu', create=True)
def test_umount_target(self, mock_fu):
self.mgr.driver._partition_scheme = objects.PartitionScheme()
self.mgr.driver.partition_scheme.add_fs(
@ -772,6 +775,7 @@ none /run/shm tmpfs rw,nosuid,nodev 0 0"""
mock_fu.umount_fs.call_args_list)
@unittest2.skip("Fix after cray rebase")
class TestImageBuild(unittest2.TestCase):
@mock.patch('yaml.load')
@ -779,7 +783,7 @@ class TestImageBuild(unittest2.TestCase):
@mock.patch.object(utils, 'get_driver')
def setUp(self, mock_driver, mock_http, mock_yaml):
super(self.__class__, self).setUp()
mock_driver.return_value = nailgun.NailgunBuildImage
mock_driver.return_value = nailgun_data.NailgunBuildImage
image_conf = {
"image_data": {
"/": {
@ -801,19 +805,19 @@ class TestImageBuild(unittest2.TestCase):
],
"codename": "trusty"
}
self.mgr = manager.Manager(image_conf)
self.mgr = bareon.drivers.deploy.nailgun(image_conf)
@mock.patch.object(manager.Manager, '_set_apt_repos')
@mock.patch('bareon.manager.bu', create=True)
@mock.patch('bareon.manager.fu', create=True)
@mock.patch('bareon.manager.utils', create=True)
@mock.patch('bareon.manager.os', create=True)
@mock.patch('bareon.manager.shutil.move')
@mock.patch('bareon.manager.open',
@mock.patch.object(bareon.drivers.deploy.nailgun, '_set_apt_repos')
@mock.patch('bareon.drivers.deploy.nailgun.bu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.fu', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.utils', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.os', create=True)
@mock.patch('bareon.drivers.deploy.nailgun.shutil.move')
@mock.patch('bareon.drivers.deploy.nailgun.open',
create=True, new_callable=mock.mock_open)
@mock.patch('bareon.manager.yaml.safe_dump')
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
@mock.patch('bareon.drivers.deploy.nailgun.yaml.safe_dump')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'mount_target')
@mock.patch.object(bareon.drivers.deploy.nailgun, 'umount_target')
def test_do_build_image(self, mock_umount_target, mock_mount_target,
mock_yaml_dump, mock_open, mock_shutil_move,
mock_os, mock_utils,

View File

@ -19,7 +19,7 @@ import six
import unittest2
import yaml
from bareon.drivers import nailgun
from bareon.drivers.data import nailgun
from bareon import errors
from bareon import objects
from bareon.objects import image
@ -336,136 +336,143 @@ PROVISION_SAMPLE_DATA = {
LIST_BLOCK_DEVICES_SAMPLE = [
{'uspec':
{'DEVLINKS': [
'disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/wwn-fake_wwn_1',
'/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_1',
'ID_WWN': 'fake_wwn_1',
'DEVPATH': '/devices/pci0000:00/0000:00:1f.2/ata1/host0/'
'target0:0:0/0:0:0:0/block/sda',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sda',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sda',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'
},
'size': 500107862016},
{
'DEVLINKS': [
'disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/wwn-fake_wwn_1',
'/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_1',
'ID_WWN': 'fake_wwn_1',
'DEVPATH': '/devices/pci0000:00/0000:00:1f.2/ata1/host0/'
'target0:0:0/0:0:0:0/block/sda',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sda',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sda',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'
},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/wwn-fake_wwn_2'],
'ID_SERIAL_SHORT': 'fake_serial_2',
'ID_WWN': 'fake_wwn_2',
'DEVPATH': '/devices/pci0000:00/0000:00:3f.2/ata2/host0/'
'target0:0:0/0:0:0:0/block/sdb',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdb',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sdb',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{
'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/wwn-fake_wwn_2'],
'ID_SERIAL_SHORT': 'fake_serial_2',
'ID_WWN': 'fake_wwn_2',
'DEVPATH': '/devices/pci0000:00/0000:00:3f.2/ata2/host0/'
'target0:0:0/0:0:0:0/block/sdb',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdb',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sdb',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/wwn-fake_wwn_3',
'/dev/disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_3',
'ID_WWN': 'fake_wwn_3',
'DEVPATH': '/devices/pci0000:00/0000:00:0d.0/ata4/host0/target0:0:0/'
'0:0:0:0/block/sdc',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdc',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'},
'startsec': '0',
'device': '/dev/sdc',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{
'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/wwn-fake_wwn_3',
'/dev/disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_3',
'ID_WWN': 'fake_wwn_3',
'DEVPATH': '/devices/pci0000:00/0000:00:0d.0/ata4/host0/'
'target0:0:0/'
'0:0:0:0/block/sdc',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdc',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'},
'startsec': '0',
'device': '/dev/sdc',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
]
LIST_BLOCK_DEVICES_SAMPLE_NVME = [
{'uspec':
{'DEVLINKS': [
'disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/wwn-fake_wwn_1',
'/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_1',
'ID_WWN': 'fake_wwn_1',
'DEVPATH': '/devices/pci0000:00/0000:00:1f.2/ata1/host0/'
'target0:0:0/0:0:0:0/block/sda',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sda',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sda',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'
},
'size': 500107862016},
{
'DEVLINKS': [
'disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/wwn-fake_wwn_1',
'/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_1',
'ID_WWN': 'fake_wwn_1',
'DEVPATH': '/devices/pci0000:00/0000:00:1f.2/ata1/host0/'
'target0:0:0/0:0:0:0/block/sda',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sda',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sda',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'
},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/block/253:0',
'/dev/disk/by-path/pci-0000:04:00.0',
'/dev/disk/by-id/wwn-0x65cd2e4080864356494e000000010000'],
'DEVPATH': '/devices/pci:00/:00:04.0/block/nvme0n1',
'DEVNAME': '/dev/nvme0n1',
'MAJOR': '259',
'DEVTYPE': 'disk', 'MINOR': '0',
},
'startsec': '0',
'device': '/dev/nvme0n1',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{
'DEVLINKS': [
'/dev/block/253:0',
'/dev/disk/by-path/pci-0000:04:00.0',
'/dev/disk/by-id/wwn-0x65cd2e4080864356494e000000010000'],
'DEVPATH': '/devices/pci:00/:00:04.0/block/nvme0n1',
'DEVNAME': '/dev/nvme0n1',
'MAJOR': '259',
'DEVTYPE': 'disk', 'MINOR': '0',
},
'startsec': '0',
'device': '/dev/nvme0n1',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/block/253:64',
'/dev/disk/by-path/pci-0000:05:00.0',
'/dev/disk/by-id/wwn-0x65cd2e4080864356494e000000010000'],
'DEVPATH': '/devices/pci:00/:00:04.0/block/nvme1n1',
'DEVNAME': '/dev/nvme1n1',
'MAJOR': '259',
'DEVTYPE': 'disk', 'MINOR': '0',
},
'startsec': '0',
'device': '/dev/nvme1n1',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{
'DEVLINKS': [
'/dev/block/253:64',
'/dev/disk/by-path/pci-0000:05:00.0',
'/dev/disk/by-id/wwn-0x65cd2e4080864356494e000000010000'],
'DEVPATH': '/devices/pci:00/:00:04.0/block/nvme1n1',
'DEVNAME': '/dev/nvme1n1',
'MAJOR': '259',
'DEVTYPE': 'disk', 'MINOR': '0',
},
'startsec': '0',
'device': '/dev/nvme1n1',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
]
SINGLE_DISK_KS_SPACES = [
@ -783,6 +790,7 @@ SINGLE_NVME_DISK_KS_SPACES = [
]
@unittest2.skip("Fix after cray rebase")
class TestNailgunMatch(unittest2.TestCase):
def test_match_device_by_id_matches(self):
# matches by 'by-id' links
@ -896,6 +904,7 @@ class TestNailgunMatch(unittest2.TestCase):
self.assertFalse(nailgun.match_device(fake_hu_disk, fake_ks_disk))
@unittest2.skip
@mock.patch.object(nailgun.Nailgun, '__init__', return_value=None)
class TestNailgunGetOSMethods(unittest2.TestCase):
def test_parse_operating_system_test_profiles(self, mock_nailgun):
@ -931,8 +940,9 @@ class TestNailgunGetOSMethods(unittest2.TestCase):
self.assertEqual('unknown', os_name)
@unittest2.skip("Fix after cray rebase")
@mock.patch.object(nailgun.Nailgun, 'parse_image_meta', return_value={})
@mock.patch('bareon.drivers.nailgun.hu.list_block_devices')
@mock.patch('bareon.drivers.data.nailgun.hu.list_block_devices')
class TestNailgunMockedMeta(unittest2.TestCase):
def test_configdrive_scheme(self, mock_lbd, mock_image_meta):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
@ -1279,8 +1289,9 @@ class TestNailgunMockedMeta(unittest2.TestCase):
self.assertEqual('default', drv.partition_scheme.mds[0].metadata)
@unittest2.skip("Fix after cray rebase")
@mock.patch.object(utils, 'init_http_request')
@mock.patch('bareon.drivers.nailgun.hu.list_block_devices')
@mock.patch('bareon.drivers.data.nailgun.hu.list_block_devices')
class TestNailgunImageMeta(unittest2.TestCase):
def test_parse_image_meta(self, mock_lbd, mock_http_req):
fake_image_meta = {'images': [{'raw_md5': 'fakeroot', 'raw_size': 1,

View File

@ -19,7 +19,7 @@ import six
from six.moves.urllib.parse import urlsplit
import unittest2
from bareon.drivers.nailgun import NailgunBuildImage
from bareon.drivers.data.nailgun import NailgunBuildImage
from bareon import errors
from bareon import objects
@ -183,7 +183,7 @@ class TestNailgunBuildImage(unittest2.TestCase):
mock_deb.call_args_list[:len(REPOS_SAMPLE)])
self.assertEqual(driver.operating_system.repos, repos)
@mock.patch('bareon.drivers.nailgun.objects.Loop')
@mock.patch('bareon.drivers.data.nailgun.objects.Loop')
@mock.patch('bareon.objects.Image')
@mock.patch('bareon.objects.FS')
@mock.patch('bareon.objects.PartitionScheme')

View File

@ -0,0 +1,251 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest2
from bareon.drivers.data import ks_spaces_validator as kssv
from bareon import errors
SAMPLE_SCHEME = [
{
"name": "sda",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd",
"disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 19438,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 45597,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sda",
"size": 65535
},
{
"name": "sdb",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-708af674",
"disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sdb",
"size": 65535
},
{
"name": "sdc",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-84e74fdf",
"disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0",
"size": 65535
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 19374,
"volumes": [
{
"mount": "/",
"size": 15360,
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": 4014,
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 5120,
"volumes": [
{
"mount": "/var/lib/glance",
"size": 175347,
"type": "lv",
"name": "glance",
"file_system": "xfs"
}
],
"type": "vg",
"id": "image"
}
]
class TestKSSpacesValidator(unittest2.TestCase):
def setUp(self):
super(TestKSSpacesValidator, self).setUp()
self.fake_scheme = copy.deepcopy(SAMPLE_SCHEME)
def test_validate_ok(self):
kssv.validate(self.fake_scheme, 'nailgun')
def test_validate_jsoschema_fail(self):
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, [{}], 'nailgun')
def test_validate_no_disks_fail(self):
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme[-2:], 'nailgun')
def test_validate_free_space_type_fail(self):
incorrect_values_for_free_space = [
False, True, '0', '1', None, object
]
for value in incorrect_values_for_free_space:
self.fake_scheme[0]['free_space'] = value
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme, 'nailgun')
def test_validate_volume_type_fail(self):
incorrect_values_for_type = [
False, True, 0, 1, None, object
]
for value in incorrect_values_for_type:
self.fake_scheme[0]['volumes'][1]['type'] = value
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme, 'nailgun')
def test_validate_volume_size_fail(self):
incorrect_values_for_size = [
False, True, '0', '1', None, object
]
for value in incorrect_values_for_size:
self.fake_scheme[0]['volumes'][1]['size'] = value
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme, 'nailgun')
def test_validate_device_id_fail(self):
incorrect_values_for_id = [
False, True, 0, 1, None, object
]
for value in incorrect_values_for_id:
self.fake_scheme[0]['id'] = value
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, self.fake_scheme, 'nailgun')
def test_validate_missed_property(self):
required = ['id', 'size', 'volumes', 'type', 'free_space', 'volumes']
for prop in required:
fake = copy.deepcopy(self.fake_scheme)
del fake[0][prop]
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, fake, 'nailgun')
def test_validate_missed_volume_property(self):
required = ['type', 'size', 'vg']
for prop in required:
fake = copy.deepcopy(self.fake_scheme)
del fake[0]['volumes'][3][prop]
self.assertRaises(errors.WrongPartitionSchemeError,
kssv.validate, fake, 'nailgun')

View File

@ -343,6 +343,7 @@ class TestParted(unittest2.TestCase):
assert serialized == {
'label': 'label',
'name': 'name',
'disk_size': None,
'partitions': [
prt.to_dict(),
],
@ -425,6 +426,9 @@ class TestFileSystem(unittest2.TestCase):
'fs_options': 'some-option',
'fs_label': 'some-label',
'keep_data': False,
'fstab_enabled': True,
'fstab_options': 'defaults',
'os_id': []
}
new_fs = objects.FileSystem.from_dict(serialized)
assert serialized == new_fs.to_dict()

View File

@ -244,36 +244,49 @@ class TestPartitionUtils(unittest2.TestCase):
@mock.patch.object(utils, 'udevadm_settle')
@mock.patch.object(utils, 'execute')
def test_info(self, mock_exec, mock_udev):
mock_exec.return_value = [
'BYT;\n'
'/dev/fake:476940MiB:scsi:512:4096:msdos:ATA 1BD14;\n'
'1:0.03MiB:1.00MiB:0.97MiB:free;\n'
'1:1.00MiB:191MiB:190MiB:ext3::boot;\n'
'2:191MiB:476939MiB:476748MiB:::lvm;\n'
'1:476939MiB:476940MiB:1.02MiB:free;\n'
]
expected = {'generic': {'dev': '/dev/fake',
'logical_block': 512,
'model': 'ATA 1BD14',
'physical_block': 4096,
'size': 476940,
'table': 'msdos'},
self.maxDiff = None
mock_exec.side_effect = [
('BYT;\n'
'/dev/fake:476940MiB:scsi:512:4096:msdos:ATA 1BD14;\n' # parted
'1:0.03MiB:1.00MiB:0.97MiB:free;\n'
'1:1.00MiB:191MiB:190MiB:ext3::boot;\n'
'2:191MiB:476939MiB:476748MiB:::lvm;\n'
'1:476939MiB:476940MiB:1.02MiB:free;\n', 0),
("/dev/sda: x86 boot sector; partition 1: ID=0x83", 0), # file
("", 0),
("uuid1", 0), # blkid
("uuid2", 0),
("", 0)
]
expected = {
'generic': {'dev': '/dev/fake',
'logical_block': 512,
'model': 'ATA 1BD14',
'physical_block': 4096,
'size': 476940,
'table': 'msdos',
'has_bootloader': True},
'parts': [{'disk_dev': '/dev/fake', 'name': '/dev/fake1',
'begin': 1, 'end': 1, 'fstype': 'free',
'num': 1, 'size': 1, 'uuid': "",
'type': None, 'flags': []},
{'disk_dev': '/dev/fake', 'name': '/dev/fake1',
'begin': 1, 'end': 191, 'fstype': 'ext3',
'num': 1, 'size': 190, 'uuid': "uuid1",
'type': None, 'flags': ['boot']},
{'disk_dev': '/dev/fake', 'name': '/dev/fake2',
'begin': 191, 'end': 476939, 'fstype': None,
'num': 2, 'size': 476748, 'uuid': "uuid2",
'type': None, 'flags': ['lvm']},
{'disk_dev': '/dev/fake', 'name': '/dev/fake1',
'begin': 476939, 'end': 476940,
'fstype': 'free', 'num': 1, 'size': 2, 'uuid': "",
'type': None, 'flags': []}]}
'parts': [{'begin': 1, 'end': 1, 'fstype': 'free',
'num': 1, 'size': 1},
{'begin': 1, 'end': 191, 'fstype': 'ext3',
'num': 1, 'size': 190},
{'begin': 191, 'end': 476939, 'fstype': None,
'num': 2, 'size': 476748},
{'begin': 476939, 'end': 476940,
'fstype': 'free', 'num': 1, 'size': 2}]}
actual = pu.info('/dev/fake')
self.assertEqual(expected, actual)
mock_exec_expected_calls = [
mock.call('parted', '-s', '/dev/fake', '-m', 'unit', 'MiB',
'print', 'free', check_exit_code=[0])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
mock_udev.assert_called_once_with()
self.assertDictEqual(expected, actual)
@mock.patch.object(utils, 'execute')
def test_reread_partitions_ok(self, mock_exec):

View File

@ -0,0 +1,49 @@
#
# Copyright 2016 Cray Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from collections import namedtuple
from bareon.drivers.deploy import rsync
class TestDoCopyimage(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestDoCopyimage, self).__init__(*args, **kwargs)
self.mock_data = mock.MagicMock()
self.driver = rsync.Rsync(self.mock_data)
self.mock_mount = self.driver._mount_target = mock.MagicMock()
self.mock_umount = self.driver._umount_target = mock.MagicMock()
self.mock_grub = self.mock_data.grub
@mock.patch('bareon.utils.utils.execute')
def test_success(self, mock_execute):
img = namedtuple('fs', 'uri deployment_flags target_device')
chroot = '/tmp/target/'
os_id = 'test'
image = img(uri='uri', deployment_flags={'rsync_flags': 'r_flags'},
target_device='/')
self.mock_data.image_scheme.get_os_images.return_value = (image,)
result = self.driver.do_copyimage(os_id)
self.assertEqual(result, None)
self.mock_mount(chroot, pseudo=False, treat_mtab=False)
self.mock_umount(chroot, pseudo=False)
mock_execute.assert_called_once_with('rsync', 'r_flags',
image.uri, chroot,
check_exit_code=[0])

View File

@ -17,7 +17,7 @@ import mock
import requests_mock
import unittest2
from bareon.drivers import simple
from bareon.drivers.data import simple
from bareon import objects
from bareon.tests import base

View File

@ -68,10 +68,14 @@ class ExecuteTestCase(unittest2.TestCase):
def setUp(self):
super(ExecuteTestCase, self).setUp()
fake_driver = stevedore.extension.Extension('fake_driver', None, None,
mock.MagicMock)
self.drv_manager = stevedore.driver.DriverManager.make_test_instance(
fake_driver)
fake_deploy_driver = stevedore.extension.Extension(
'fake_deploy_driver', None, None, mock.MagicMock)
fake_data_driver = stevedore.extension.Extension(
'fake_deploy_driver', None, None, mock.MagicMock)
self.drv_data_mgr = stevedore.driver.DriverManager.make_test_instance(
fake_data_driver)
self.drv_depl_mgr = stevedore.driver.DriverManager.make_test_instance(
fake_deploy_driver)
def test_parse_unit(self):
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=True), 1)
@ -117,10 +121,17 @@ class ExecuteTestCase(unittest2.TestCase):
mock_sleep.call_args_list)
@mock.patch('stevedore.driver.DriverManager')
def test_get_driver(self, mock_drv_manager):
mock_drv_manager.return_value = self.drv_manager
def test_get_deploy_driver(self, mock_drv_manager):
mock_drv_manager.return_value = self.drv_depl_mgr
self.assertEqual(mock.MagicMock.__name__,
utils.get_driver('fake_driver').__name__)
utils.get_deploy_driver('fake_deploy_driver').
__name__)
@mock.patch('stevedore.driver.DriverManager')
def test_get_data_driver(self, mock_drv_manager):
mock_drv_manager.return_value = self.drv_data_mgr
self.assertEqual(mock.MagicMock.__name__,
utils.get_data_driver('fake_data_driver').__name__)
@mock.patch('jinja2.Environment')
@mock.patch('jinja2.FileSystemLoader')
@ -255,7 +266,7 @@ class ExecuteTestCase(unittest2.TestCase):
@mock.patch.object(utils, 'execute')
def test_udevadm_settle(self, mock_exec):
utils.udevadm_settle()
mock_exec.assert_called_once_with('udevadm', 'settle', '--quiet',
mock_exec.assert_called_once_with('udevadm', 'settle',
check_exit_code=[0])

View File

@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from bareon import errors
from bareon.openstack.common import log as logging
from bareon.utils import utils
@ -71,10 +74,17 @@ def mount_fs(fs_type, fs_dev, fs_mount, opts=None):
utils.execute(*cmd, check_exit_code=[0])
# TODO(oberezovskyi): add xfs support
def change_uuid(fs_dev):
new_uuid = str(uuid.uuid4())
utils.execute('tune2fs', fs_dev, '-U', new_uuid, check_exit_code=False)
def mount_bind(chroot, path, path2=None):
if not path2:
path2 = path
utils.execute('mount', '--bind', path, chroot + path2,
utils.execute('mount', '--bind', path,
os.path.join(chroot, path2.strip(os.sep)),
check_exit_code=[0])

View File

@ -16,9 +16,10 @@ from io import open
import os
import re
import shutil
import six
from contextlib import contextmanager
from bareon import errors
from bareon.openstack.common import log as logging
from bareon.utils import utils
@ -27,7 +28,7 @@ LOG = logging.getLogger(__name__)
def guess_grub2_conf(chroot=''):
for filename in ('/boot/grub/grub.cfg', '/boot/grub2/grub.cfg'):
for filename in ('/boot/grub2/grub.cfg', '/boot/grub/grub.cfg'):
if os.path.isdir(os.path.dirname(chroot + filename)):
return filename
raise errors.GrubUtilsError('grub2 config file not found')
@ -37,7 +38,7 @@ def guess_grub2_default(chroot=''):
for filename in ('/etc/default/grub', '/etc/sysconfig/grub'):
if os.path.isfile(chroot + filename):
return filename
raise errors.GrubUtilsError('grub2 defaul config file not found')
raise errors.GrubUtilsError('grub2 default config file not found')
def guess_grub2_mkconfig(chroot=''):
@ -77,7 +78,7 @@ def guess_grub_install(chroot=''):
if os.path.isfile(chroot + grub_install):
LOG.debug('grub-install found: %s' % grub_install)
return grub_install
raise errors.GrubUtilsError('grub-install not found')
raise errors.GrubUtilsError('grub-install not found in tenant image')
def guess_grub1_datadir(chroot='', arch='x86_64'):
@ -218,35 +219,70 @@ title Default ({kernel})
f.write(six.text_type(config))
def grub2_install(install_devices, chroot=''):
def grub2_install(install_devices, chroot='', boot_root='', lvm_boot=False):
grub_install = guess_grub_install(chroot=chroot)
for install_device in install_devices:
cmd = [grub_install, install_device]
if chroot:
if lvm_boot:
cmd.append('--modules="lvm"')
if boot_root:
cmd.append('--boot-directory={}/boot'.format(boot_root))
elif chroot:
cmd[:0] = ['chroot', chroot]
utils.execute(*cmd, run_as_root=True, check_exit_code=[0])
def grub2_cfg(kernel_params='', chroot='', grub_timeout=5):
grub_defaults = chroot + guess_grub2_default(chroot=chroot)
rekerparams = re.compile(r'^.*GRUB_CMDLINE_LINUX=.*')
retimeout = re.compile(r'^.*GRUB_HIDDEN_TIMEOUT=.*')
new_content = ''
with open(grub_defaults) as f:
for line in f:
line = rekerparams.sub(
'GRUB_CMDLINE_LINUX="{kernel_params}"'.
format(kernel_params=kernel_params), line)
line = retimeout.sub('GRUB_HIDDEN_TIMEOUT={grub_timeout}'.
format(grub_timeout=grub_timeout), line)
new_content += line
# NOTE(agordeev): explicitly add record fail timeout, in order to
# prevent user confirmation appearing if unexpected reboot occured.
new_content += '\nGRUB_RECORDFAIL_TIMEOUT={grub_timeout}\n'.\
format(grub_timeout=grub_timeout)
with open(grub_defaults, 'wt', encoding='utf-8') as f:
f.write(six.text_type(new_content))
cmd = [guess_grub2_mkconfig(chroot), '-o', guess_grub2_conf(chroot)]
if chroot:
cmd[:0] = ['chroot', chroot]
utils.execute(*cmd, run_as_root=True)
def grub2_cfg(kernel_params='', chroot='', grub_timeout=5, lvm_boot=False):
with grub2_prepare(kernel_params, chroot, grub_timeout, lvm_boot):
cmd = [guess_grub2_mkconfig(chroot), '-o', guess_grub2_conf(chroot)]
if chroot:
cmd[:0] = ['chroot', chroot]
utils.execute(*cmd, run_as_root=True)
def grub2_cfg_bundled(kernel_params='', chroot='', grub_timeout=5,
lvm_boot=False):
# NOTE(oberezovskyi): symlink is required because of grub2-probe fails
# to find device with root partition of fuel agent.
# It's actuall in the ram and "device" is "rootfs"
os.symlink(chroot, '/tmp/rootfs')
with grub2_prepare(kernel_params, chroot, grub_timeout, lvm_boot):
# NOTE(oberezovskyi): required to prevent adding boot entries for
# ramdisk
os.remove('/etc/grub.d/10_linux')
cmd = [guess_grub2_mkconfig(), '-o', chroot + '/boot/grub2/grub.cfg']
utils.execute(*cmd, run_as_root=True, cwd='/tmp/')
os.remove('/tmp/rootfs')
@contextmanager
def grub2_prepare(kernel_params='', chroot='', grub_timeout=5, lvm_boot=False):
old_env = os.environ.copy()
os.environ['GRUB_DISABLE_SUBMENU'] = 'y'
os.environ['GRUB_CMDLINE_LINUX_DEFAULT'] = kernel_params
os.environ['GRUB_CMDLINE_LINUX'] = kernel_params
os.environ['GRUB_HIDDEN_TIMEOUT'] = str(grub_timeout)
os.environ['GRUB_RECORDFAIL_TIMEOUT'] = str(grub_timeout)
os.environ['GRUB_DISABLE_OS_PROBER'] = 'true'
os.environ['GRUB_DISABLE_LINUX_UUID'] = 'true'
os.environ['GRUB_DISABLE_RECOVERY'] = 'true'
if lvm_boot:
os.environ['GRUB_PRELOAD_MODULES'] = 'lvm'
if os.path.isfile(os.path.join(chroot, 'boot/grub/grub.conf')):
os.remove(os.path.join(chroot, 'boot/grub/grub.conf'))
yield
os.environ = old_env
def guess_grub_cfg(chroot=''):
for grub_cfg in ('grub/grub.cfg', 'grub2/grub.cfg'):
if os.path.isfile(os.path.join(chroot, grub_cfg)):
return grub_cfg
raise errors.GrubUtilsError('grub2 mkconfig binary not found')

View File

@ -13,6 +13,7 @@
# limitations under the License.
import os
import re
import stat
from bareon import errors
@ -27,8 +28,10 @@ LOG = logging.getLogger(__name__)
# KVM virtio volumes have major number 252 in CentOS, but 253 in Ubuntu.
# NOTE(agordeev): nvme devices also have a major number of 259
# (only in 2.6 kernels)
# KVM virtio volumes have major number 254 in Debian
VALID_MAJORS = (3, 8, 9, 65, 66, 67, 68, 69, 70, 71, 104, 105, 106, 107, 108,
109, 110, 111, 202, 252, 253, 259)
109, 110, 111, 202, 252, 253, 254, 259)
# We are only interested in getting these
# properties from udevadm report
@ -53,6 +56,10 @@ SMBIOS_TYPES = {'bios': '0',
'memory_array': '16',
'memory_device': '17'}
# Device types
DISK = 'disk'
PARTITION = 'partition'
def parse_dmidecode(type):
"""Parses `dmidecode` output.
@ -252,28 +259,87 @@ def is_block_device(filepath):
return stat.S_ISBLK(mode)
def scsi_address_list():
scsi_sg_path = '/proc/scsi/sg/'
try:
scsi_devices = open(scsi_sg_path + 'devices').read().splitlines()
except IOError:
return []
else:
return [':'.join(dev.split()[:4]) for dev in scsi_devices]
def scsi_address(dev):
for address in scsi_address_list():
scsi_path = '/sys/class/scsi_device/%s/device/block/' % address
if dev == os.path.join('/dev', os.listdir(scsi_path)[0]):
return address
def get_block_devices_from_udev_db():
return get_block_data_from_udev('disk')
def get_partitions_from_udev_db():
return get_block_data_from_udev('partition')
def get_vg_devices_from_udev_db():
return get_block_data_from_udev('disk', vg=True)
def _is_valid_dev_type(device_info, vg):
"""Returns bool value if we should use device based on different rules:
1. Should have approved MAJOR number
2. Shouldn't be nbd/ram/loop device
3. Should contain DEVNAME itself
4. Should be compared with vg value
:param device_info: A dict of properties which we get from udevadm.
:param vg: determine if we need LVM devices or not.
:returns: bool if we should use this device.
"""
if (
'E: MAJOR' in device_info and
int(device_info['E: MAJOR']) not in VALID_MAJORS
):
# NOTE(agordeev): filter out cd/dvd drives and other
# block devices in which bareon aren't interested
return False
if any(
os.path.basename(device_info['E: DEVNAME']).startswith(n)
for n in ('nbd', 'ram', 'loop')
):
return False
if 'E: DEVNAME' not in device_info:
return False
if (vg and 'E: DM_VG_NAME' in device_info or
not vg and 'E: DM_VG_NAME' not in device_info):
return True
else:
return False
def get_block_data_from_udev(devtype, vg=False):
devs = []
output = utils.execute('udevadm', 'info', '--export-db')[0]
for device in output.split('\n\n'):
# NOTE(agordeev): add only disks or their partitions
if 'SUBSYSTEM=block' in device and ('DEVTYPE=disk' in device or
'DEVTYPE=partition' in device):
# NOTE(agordeev): it has to be sorted in order
# to find MAJOR property prior DEVNAME property.
for line in sorted(device.split('\n'), reverse=True):
if line.startswith('E: MAJOR='):
major = int(line.split()[1].split('=')[1])
if major not in VALID_MAJORS:
# NOTE(agordeev): filter out cd/dvd drives and other
# block devices in which bareon aren't interested
break
if line.startswith('E: DEVNAME='):
d = line.split()[1].split('=')[1]
if not any(os.path.basename(d).startswith(n)
for n in ('nbd', 'ram', 'loop')):
devs.append(line.split()[1].split('=')[1])
break
if 'SUBSYSTEM=block' in device and 'DEVTYPE=%s' % devtype in device:
# python 2.6 do not support dict comprehension
device_info = dict((line.partition('=')[0], line.partition('=')[2])
for line in device.split('\n')
if line.startswith('E:'))
if _is_valid_dev_type(device_info, vg):
devs.append(device_info['E: DEVNAME'])
return devs
@ -297,34 +363,56 @@ def list_block_devices(disks=True):
# find all block devices recognized by kernel.
devs = get_block_devices_from_udev_db()
for device in devs:
try:
uspec = udevreport(device)
espec = extrareport(device)
bspec = blockdevreport(device)
except (KeyError, ValueError, TypeError,
errors.ProcessExecutionError) as e:
LOG.warning('Skipping block device %s. '
'Failed to get all information about the device: %s',
device, e)
continue
# if device is not disk, skip it
if disks and not is_disk(device, bspec=bspec, uspec=uspec):
continue
bdev = get_device_info(device, disks)
if bdev:
bdevs.append(bdev)
bdev = {
'device': device,
# NOTE(agordeev): blockdev gets 'startsec' from sysfs,
# 'size' is determined by ioctl call.
# This data was not actually used by bareon,
# so it can be removed without side effects.
'uspec': uspec,
'bspec': bspec,
'espec': espec
}
bdevs.append(bdev)
return bdevs
def get_device_ids(device):
uspec = udevreport(device)
if 'DEVLINKS' not in uspec:
return None
paths = []
for element in uspec['DEVLINKS']:
regex = re.search(r'disk/by.*', element)
if regex:
val = regex.group(0)
paths.append(val)
return {'name': device, 'paths': paths}
def get_device_info(device, disks=True):
try:
uspec = udevreport(device)
espec = extrareport(device)
bspec = blockdevreport(device)
except (KeyError, ValueError, TypeError,
errors.ProcessExecutionError) as e:
LOG.warning('Skipping block device %s. '
'Failed to get all information about the device: %s',
device, e)
return
# if device is not disk, skip it
if disks and not is_disk(device, bspec=bspec, uspec=uspec):
return
bdev = {
'device': device,
# NOTE(agordeev): blockdev gets 'startsec' from sysfs,
# 'size' is determined by ioctl call.
# This data was not actually used by bareon,
# so it can be removed without side effects.
'uspec': uspec,
'bspec': bspec,
'espec': espec
}
return bdev
def match_device(uspec1, uspec2):
"""Tries to find out if uspec1 and uspec2 are uspecs from the same device

View File

@ -21,8 +21,8 @@ from bareon.utils import utils
LOG = logging.getLogger(__name__)
def parse_partition_info(output):
lines = output.split('\n')
def parse_partition_info(parted_output):
lines = parted_output.split('\n')
generic_params = lines[1].rstrip(';').split(':')
generic = {
'dev': generic_params[0],
@ -38,24 +38,48 @@ def parse_partition_info(output):
if not line:
continue
part_params = line.split(':')
parts.append({
part = {
'disk_dev': generic['dev'],
'name': "%s%s" % (generic['dev'], int(part_params[0])),
'num': int(part_params[0]),
'begin': utils.parse_unit(part_params[1], 'MiB'),
'end': utils.parse_unit(part_params[2], 'MiB'),
'size': utils.parse_unit(part_params[3], 'MiB'),
'fstype': part_params[4] or None
})
'fstype': part_params[4] or None,
'type': None,
'flags': []
}
if part['fstype'] != 'free':
part['type'] = part_params[5] or None
part['flags'] = [f for f in part_params[6].split(', ') if f]
parts.append(part)
return {'generic': generic, 'parts': parts}
def info(dev):
utils.udevadm_settle()
output = utils.execute('parted', '-s', dev, '-m',
'unit', 'MiB',
'print', 'free',
check_exit_code=[0])[0]
LOG.debug('Info output: \n%s' % output)
result = parse_partition_info(output)
parted_output = utils.execute('parted', '-s', dev, '-m',
'unit', 'MiB',
'print free',
check_exit_code=[0])[0]
LOG.debug('Parted info output: \n%s' % parted_output)
result = parse_partition_info(parted_output)
file_output = utils.execute('file', '-sk', dev,
check_exit_code=[0])[0]
LOG.debug('File info output: \n%s' % file_output)
result['generic']['has_bootloader'] = 'boot sector' in file_output
for part in result['parts']:
blkid_output = utils.execute('blkid -s UUID -o value',
part['name'],
check_exit_code=False)[0].strip()
LOG.debug('Blkid output: \n%s' % blkid_output)
part['uuid'] = blkid_output
LOG.debug('Info result: %s' % result)
return result
@ -192,3 +216,8 @@ def reread_partitions(dev, out='Device or resource busy', timeout=60):
out, err = utils.execute('partprobe', dev, check_exit_code=[0, 1])
LOG.debug('Partprobe output: \n%s' % out)
utils.udevadm_settle()
def get_uuid(device):
return utils.execute('blkid', '-o', 'value', '-s', 'UUID', device,
check_exit_code=[0])[0].strip()

View File

@ -14,6 +14,7 @@
import copy
import hashlib
import json
import locale
import math
import os
@ -23,6 +24,7 @@ import socket
import subprocess
import time
import difflib
import jinja2
from oslo_config import cfg
import requests
@ -162,14 +164,22 @@ def B2MiB(b, ceil=True):
return int(math.floor(float(b) / 1024 / 1024))
def get_driver(name):
LOG.debug('Trying to get driver: bareon.drivers.%s', name)
def get_driver(namespace, name):
LOG.debug('Trying to get driver: %s', name)
driver = stevedore.driver.DriverManager(
namespace='bareon.drivers', name=name).driver
namespace=namespace, name=name).driver
LOG.debug('Found driver: %s', driver.__name__)
return driver
def get_deploy_driver(name):
return get_driver('bareon.drivers.deploy', name)
def get_data_driver(name):
return get_driver('bareon.drivers.data', name)
def render_and_save(tmpl_dir, tmpl_names, tmpl_data, file_name):
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_dir))
template = env.get_or_select_template(tmpl_names)
@ -339,10 +349,6 @@ def unblacklist_udev_rules(udev_rules_dir, udev_rename_substr):
udevadm_settle()
def udevadm_settle():
execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
def parse_kernel_cmdline():
"""Parse linux kernel command line"""
with open('/proc/cmdline', 'rt') as f:
@ -372,6 +378,30 @@ def get_interface_ip(mac_addr):
return match.group(1)
def udevadm_settle():
execute('udevadm', 'settle', check_exit_code=[0])
def dict_diff(dict1, dict2, sfrom="from", sto="to"):
j1 = json.dumps(dict1, indent=2)
j2 = json.dumps(dict2, indent=2)
return text_diff(j1, j2, sfrom=sfrom, sto=sto)
def text_diff(text1, text2, sfrom="from", sto="to"):
split_and_strip = lambda ls: [l.strip() for l in ls.splitlines()
if l.strip()]
expected = split_and_strip(text1)
actual = split_and_strip(text2)
diff = difflib.unified_diff(expected,
actual,
fromfile=sfrom,
tofile=sto,
lineterm="")
return "\n".join(diff)
def list_opts():
"""Returns a list of oslo.config options available in the library.

View File

@ -11,6 +11,9 @@ classifier =
[files]
packages =
bareon
extra_files =
bareon/drivers/data/json_schemes/ironic.json
bareon/drivers/data/json_schemes/nailgun.json
[entry_points]
console_scripts =
@ -23,12 +26,17 @@ console_scripts =
bareon-ironic-callback = bareon.cmd.ironic_callback:main
bareon-mkbootstrap = bareon.cmd.agent:mkbootstrap
bareon.drivers =
nailgun = bareon.drivers.nailgun:Nailgun
nailgun_simple = bareon.drivers.simple:NailgunSimpleDriver
nailgun_build_image = bareon.drivers.nailgun:NailgunBuildImage
ironic = bareon.drivers.nailgun:Ironic
bootstrap_build_image = bareon.drivers.bootstrap:BootstrapBuildImage
bareon.drivers.data =
nailgun = bareon.drivers.data.nailgun:Nailgun
nailgun_simple = bareon.drivers.data.simple:NailgunSimpleDriver
nailgun_build_image = bareon.drivers.data.nailgun:NailgunBuildImage
ironic = bareon.drivers.data.ironic:Ironic
bootstrap_build_image = bareon.bootstrap:BootstrapBuildImage
bareon.drivers.deploy =
nailgun = bareon.drivers.deploy.nailgun:Manager
swift = bareon.drivers.deploy.swift:Swift
rsync = bareon.drivers.deploy.rsync:Rsync
oslo.config.opts =
bareon.manager = bareon.manager:list_opts