diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000..1787335 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# + +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.viewcode', + 'oslosphinx', + ] + +wsme_protocols = ['restjson'] + +# autodoc generation is a bit aggressive and a nuisance when doing heavy +# text edit cycles. +# execute "export SPHINX_DEBUG=1" in your terminal to disable + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Ironic Lib' +copyright = u'OpenStack Foundation' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +from ironic_lib import version as il_version +# The full version, including alpha/beta/rc tags. +release = il_version.version_info.release_string() +# The short X.Y version. +version = il_version.version_info.version_string() + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['ironic_lib'] + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +#html_theme_path = ["."] +#html_theme = '_theme' +#html_static_path = ['_static'] + +# Output file base name for HTML help builder. +htmlhelp_basename = '%sdoc' % project + + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ( + 'index', + '%s.tex' % project, + u'%s Documentation' % project, + u'OpenStack Foundation', + 'manual' + ), +] diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000..973d158 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,88 @@ +====================== +Welcome to Ironic-lib! +====================== + +Overview +======== + +Ironic-lib is a library for use by projects under Bare Metal governance only. +This documentation is intended for developer use only. If you are looking for +documentation for deployers, please see the +`ironic documentation `_. + +Metrics +======= + +Ironic-lib provides a pluggable metrics library as of the 2.0.0 release. +Current provided backends are the default, 'noop', which discards all data, +and 'statsd', which emits metrics to a statsd daemon over the network. The +metrics backend to be used is configured via ``CONF.metrics.backend``. How +this configuration is set in practice may vary by project. + +The typical usage of metrics is to initialize and cache a metrics logger, +using the `get_metrics_logger()` method in `ironic_lib.metrics_utils`, then +use that object to decorate functions or create context managers to gather +metrics. The general convention is to provide the name of the module as the +first argument to set it as the prefix, then set the actual metric name to the +method name. For example:: + + from ironic_lib import metrics_utils + + METRICS = metrics_utils.get_metrics_logger(__name__) + + @METRICS.timer('my_simple_method') + def my_simple_method(arg, matey): + pass + + def my_complex_method(arg, matey): + with METRICS.timer('complex_method_pt_1'): + do_some_work() + + with METRICS.timer('complex_method_pt_2'): + do_more_work() + +There are three different kinds of metrics: + - **Timers** measure how long the code in the decorated method or context + manager takes to execute, and emits the value as a timer metric. These + are useful for measuring performance of a given block of code. + - **Counters** increment a counter each time a decorated method or context + manager is executed. These are useful for counting the number of times a + method is called, or the number of times an event occurs. + - **Gauges** return the value of a decorated method as a metric. This is + useful when you want to monitor the value returned by a method over time. + +Additionally, metrics can be sent directly, rather than using a context +manager or decorator, when appropriate. When used in this way, ironic-lib will +simply emit the value provided as the requested metric type. For example:: + + from ironic_lib import metrics_utils + + METRICS = metrics_utils.get_metrics_logger(__name__) + + def my_node_failure_method(node): + if node.failed: + METRICS.send_counter(node.uuid, 1) + +The provided statsd backend natively supports all three metric types. For more +information about how statsd changes behavior based on the metric type, see +`statsd metric types `_ + + +Generated Developer Documentation +================================= + +.. toctree:: + :maxdepth: 1 + + api/autoindex + + +References +========== + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/etc/rootwrap.d/ironic-lib.filters b/etc/rootwrap.d/ironic-lib.filters index 0de9496..a9e26bb 100644 --- a/etc/rootwrap.d/ironic-lib.filters +++ b/etc/rootwrap.d/ironic-lib.filters @@ -13,6 +13,8 @@ blockdev: CommandFilter, blockdev, root hexdump: CommandFilter, hexdump, root qemu-img: CommandFilter, qemu-img, root wipefs: CommandFilter, wipefs, root +sgdisk: CommandFilter, sgdisk, root +partprobe: CommandFilter, partprobe, root # ironic_lib/utils.py mkswap: CommandFilter, mkswap, root diff --git a/ironic_lib/disk_utils.py b/ironic_lib/disk_utils.py index d7a0204..530afe0 100644 --- a/ironic_lib/disk_utils.py +++ b/ironic_lib/disk_utils.py @@ -67,6 +67,12 @@ LOG = logging.getLogger(__name__) _PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:" "([\d\.]+)MiB:([\d\.]+)MiB:(\w*)::(\w*)") +CONFIGDRIVE_LABEL = "config-2" +MAX_CONFIG_DRIVE_SIZE_MB = 64 + +# Maximum disk size supported by MBR is 2TB (2 * 1024 * 1024 MB) +MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR = 2097152 + def list_partitions(device): """Get partitions information from given device. @@ -109,7 +115,7 @@ def get_disk_identifier(dev): http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr: :param dev: Path for the already populated disk device. - :returns The Disk Identifier. + :returns: The Disk Identifier. """ disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4', '-e', '''\"0x%08x\"''', @@ -434,9 +440,9 @@ def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format, :returns: a dictionary containing the following keys: 'root uuid': UUID of root partition 'efi system partition uuid': UUID of the uefi system partition - (if boot mode is uefi). + (if boot mode is uefi). NOTE: If key exists but value is None, it means partition doesn't - exist. + exist. """ # the only way for preserve_ephemeral to be set to true is if we are # rebuilding an instance with --preserve_ephemeral. @@ -539,3 +545,221 @@ def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format, def list_opts(): """Entry point for oslo-config-generator.""" return [('disk_utils', opts)] + + +def _is_disk_larger_than_max_size(device, node_uuid): + """Check if total disk size exceeds 2TB msdos limit + + :param device: device path. + :param node_uuid: node's uuid. Used for logging. + :raises: InstanceDeployFailure, if any disk partitioning related + commands fail. + :returns: True if total disk size exceeds 2TB. Returns False otherwise. + """ + try: + disksize_bytes = utils.execute('blockdev', '--getsize64', device, + use_standard_locale=True, + run_as_root=True) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError, OSError) as e: + msg = (_('Failed to get size of disk %(disk)s for node %(node)s. ' + 'Error: %(error)s') % + {'disk': device, 'node': node_uuid, 'error': e}) + LOG.error(msg) + raise exception.InstanceDeployFailure(msg) + + disksize_mb = int(disksize_bytes) // 1024 // 1024 + + return disksize_mb > MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR + + +def _get_labelled_partition(device, label, node_uuid): + """Check and return if partition with given label exists + + :param device: The device path. + :param label: Partition label + :param node_uuid: UUID of the Node. Used for logging. + :raises: InstanceDeployFailure, if any disk partitioning related + commands fail. + :returns: block device file for partition if it exists; otherwise it + returns None. + """ + try: + utils.execute('partprobe', device, run_as_root=True) + label_arg = 'LABEL=%s' % label + output, err = utils.execute('blkid', '-o', 'device', device, + '-t', label_arg, check_exit_code=[0, 2], + use_standard_locale=True, run_as_root=True) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError, OSError) as e: + msg = (_('Failed to retrieve partition labels on disk %(disk)s ' + 'for node %(node)s. Error: %(error)s') % + {'disk': device, 'node': node_uuid, 'error': e}) + LOG.error(msg) + raise exception.InstanceDeployFailure(msg) + + if output: + if len(output.split()) > 1: + raise exception.InstanceDeployFailure( + _('More than one config drive exists on device %(device)s ' + 'for node %(node)s.') + % {'device': device, 'node': node_uuid}) + + return output.rstrip() + + +def _is_disk_gpt_partitioned(device, node_uuid): + """Checks if the disk is GPT partitioned + + :param device: The device path. + :param node_uuid: UUID of the Node. Used for logging. + :raises: InstanceDeployFailure, if any disk partitioning related + commands fail. + :param node_uuid: UUID of the Node + :returns: Boolean. Returns True if disk is GPT partitioned + """ + try: + output = utils.execute('blkid', '-p', '-o', 'value', '-s', 'PTTYPE', + device, use_standard_locale=True, + run_as_root=True) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError, OSError) as e: + msg = (_('Failed to retrieve partition table type for disk %(disk)s ' + 'for node %(node)s. Error: %(error)s') % + {'disk': device, 'node': node_uuid, 'error': e}) + LOG.error(msg) + raise exception.InstanceDeployFailure(msg) + + return 'gpt' in output + + +def _fix_gpt_structs(device, node_uuid): + """Checks backup GPT data structures and moves them to end of the device + + :param device: The device path. + :param node_uuid: UUID of the Node. Used for logging. + :raises: InstanceDeployFailure, if any disk partitioning related + commands fail. + """ + try: + output, err = utils.execute('partprobe', device, + use_standard_locale=True, + run_as_root=True) + + search_str = "fix the GPT to use all of the space" + if search_str in err: + utils.execute('sgdisk', '-e', device, run_as_root=True) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError, OSError) as e: + msg = (_('Failed to fix GPT data structures on disk %(disk)s ' + 'for node %(node)s. Error: %(error)s') % + {'disk': device, 'node': node_uuid, 'error': e}) + LOG.error(msg) + raise exception.InstanceDeployFailure(msg) + + +def create_config_drive_partition(node_uuid, device, configdrive): + """Create a partition for config drive + + Checks if the device is GPT or MBR partitioned and creates config drive + partition accordingly. + + :param node_uuid: UUID of the Node. + :param device: The device path. + :param configdrive: Base64 encoded Gzipped configdrive content or + configdrive HTTP URL. + :raises: InstanceDeployFailure if config drive size exceeds maximum limit + or if it fails to create config drive. + """ + confdrive_file = None + try: + config_drive_part = _get_labelled_partition(device, + CONFIGDRIVE_LABEL, + node_uuid) + + confdrive_mb, confdrive_file = _get_configdrive(configdrive, + node_uuid) + if confdrive_mb > MAX_CONFIG_DRIVE_SIZE_MB: + raise exception.InstanceDeployFailure( + _('Config drive size exceeds maximum limit of 64MiB. ' + 'Size of the given config drive is %(size)d MiB for ' + 'node %(node)s.') + % {'size': confdrive_mb, 'node': node_uuid}) + + LOG.debug("Adding config drive partition %(size)d MiB to " + "device: %(dev)s for node %(node)s", + {'dev': device, 'size': confdrive_mb, 'node': node_uuid}) + + if config_drive_part: + LOG.debug("Configdrive for node %(node)s exists at " + "%(part)s", + {'node': node_uuid, 'part': config_drive_part}) + else: + cur_parts = set(part['number'] for part in list_partitions(device)) + + if _is_disk_gpt_partitioned(device, node_uuid): + _fix_gpt_structs(device, node_uuid) + create_option = '0:-%dMB:0' % MAX_CONFIG_DRIVE_SIZE_MB + utils.execute('sgdisk', '-n', create_option, device, + run_as_root=True) + else: + # Check if the disk has 4 partitions. The MBR based disk + # cannot have more than 4 partitions. + # TODO(stendulker): One can use logical partitions to create + # a config drive if there are 4 primary partitions. + # https://bugs.launchpad.net/ironic/+bug/1561283 + num_parts = len(list_partitions(device)) + if num_parts > 3: + raise exception.InstanceDeployFailure( + _('Config drive cannot be created for node %(node)s. ' + 'Disk uses MBR partitioning and already has ' + '%(parts)d primary partitions.') + % {'node': node_uuid, 'parts': num_parts}) + + # Check if disk size exceeds 2TB msdos limit + startlimit = '-%dMiB' % MAX_CONFIG_DRIVE_SIZE_MB + endlimit = '-0' + if _is_disk_larger_than_max_size(device, node_uuid): + # Need to create a small partition at 2TB limit + LOG.warning(_LW("Disk size is larger than 2TB for " + "node %(node)s. Creating config drive " + "at the end of the disk %(disk)s."), + {'node': node_uuid, 'disk': device}) + startlimit = (MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR - + MAX_CONFIG_DRIVE_SIZE_MB - 1) + endlimit = MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR - 1 + + utils.execute('parted', '-a', 'optimal', '-s', '--', device, + 'mkpart', 'primary', 'ext2', startlimit, + endlimit, run_as_root=True) + + upd_parts = set(part['number'] for part in list_partitions(device)) + new_part = set(upd_parts) - set(cur_parts) + if len(new_part) != 1: + raise exception.InstanceDeployFailure( + _('Disk partitioning failed on device %(device)s. ' + 'Unable to retrive config drive partition information.') + % {'device': device}) + + if is_iscsi_device(device, node_uuid): + config_drive_part = '%s-part%s' % (device, new_part.pop()) + else: + config_drive_part = '%s%s' % (device, new_part.pop()) + + dd(confdrive_file, config_drive_part) + LOG.info(_LI("Configdrive for node %(node)s successfully " + "copied onto partition %(part)s"), + {'node': node_uuid, 'part': config_drive_part}) + + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError, OSError) as e: + msg = (_('Failed to create config drive on disk %(disk)s ' + 'for node %(node)s. Error: %(error)s') % + {'disk': device, 'node': node_uuid, 'error': e}) + LOG.error(msg) + raise exception.InstanceDeployFailure(msg) + finally: + # If the configdrive was requested make sure we delete the file + # after copying the content to the partition + if confdrive_file: + utils.unlink_without_raise(confdrive_file) diff --git a/ironic_lib/exception.py b/ironic_lib/exception.py index e3f65a1..de4e041 100644 --- a/ironic_lib/exception.py +++ b/ironic_lib/exception.py @@ -26,6 +26,7 @@ import logging import six from oslo_config import cfg +from oslo_utils import excutils from ironic_lib.common.i18n import _ from ironic_lib.common.i18n import _LE @@ -70,18 +71,18 @@ class IronicException(Exception): try: message = self.message % kwargs - except Exception as e: - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception(_LE('Exception in string format operation')) - for name, value in kwargs.items(): - LOG.error("%s: %s" % (name, value)) - - if CONF.ironic_lib.fatal_exception_format_errors: - raise e - else: - # at least get the core message out if something happened - message = self.message + except Exception: + with excutils.save_and_reraise_exception() as ctxt: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + prs = ', '.join('%s=%s' % pair for pair in kwargs.items()) + LOG.exception(_LE('Exception in string format operation ' + '(arguments %s)'), prs) + if not CONF.ironic_lib.fatal_exception_format_errors: + # at least get the core message out if something + # happened + message = self.message + ctxt.reraise = False super(IronicException, self).__init__(message) diff --git a/ironic_lib/metrics.py b/ironic_lib/metrics.py index 20c8532..889ca33 100644 --- a/ironic_lib/metrics.py +++ b/ironic_lib/metrics.py @@ -26,18 +26,20 @@ from ironic_lib.common.i18n import _ class Timer(object): """A timer decorator and context manager. - It is bound to this MetricLogger. For example: + This metric type times the decorated method or code running inside the + context manager, and emits the time as the metric value. It is bound to + this MetricLogger. For example:: - from ironic_lib import metrics + from ironic_lib import metrics_utils - METRICS = metrics.get_metrics_logger() + METRICS = metrics_utils.get_metrics_logger() - @METRICS.timer('foo') - def foo(bar, baz): - print bar, baz + @METRICS.timer('foo') + def foo(bar, baz): + print bar, baz - with METRICS.timer('foo'): - do_something() + with METRICS.timer('foo'): + do_something() """ def __init__(self, metrics, name): """Init the decorator / context manager. @@ -78,18 +80,20 @@ class Timer(object): class Counter(object): """A counter decorator and context manager. - It is bound to this MetricLogger. For example: + This metric type increments a counter every time the decorated method or + context manager is executed. It is bound to this MetricLogger. For + example:: - from ironic_lib import metrics + from ironic_lib import metrics_utils - METRICS = metrics.get_metrics_logger() + METRICS = metrics_utils.get_metrics_logger() - @METRICS.counter('foo') - def foo(bar, baz): - print bar, baz + @METRICS.counter('foo') + def foo(bar, baz): + print bar, baz - with METRICS.counter('foo'): - do_something() + with METRICS.counter('foo'): + do_something() """ def __init__(self, metrics, name, sample_rate): """Init the decorator / context manager. @@ -135,18 +139,17 @@ class Counter(object): class Gauge(object): """A gauge decorator. - It is bound to this MetricLogger. For example: + This metric type returns the value of the decorated method as a metric + every time the method is executed. It is bound to this MetricLogger. For + example:: - from ironic_lib import metrics + from ironic_lib import metrics_utils - METRICS = metrics.get_metrics_logger() + METRICS = metrics_utils.get_metrics_logger() - @METRICS.gauge('foo') - def foo(bar, baz): - print bar, baz - - with METRICS.gauge('foo'): - do_something() + @METRICS.gauge('foo') + def add_foo(bar, baz): + return (bar + baz) """ def __init__(self, metrics, name): """Init the decorator / context manager. @@ -184,15 +187,15 @@ class MetricLogger(object): The data can be a gauge, a counter, or a timer. The data sent to the backend is composed of: - - a full metric name - - a numeric value + - a full metric name + - a numeric value The format of the full metric name is: _prefixname where: - _prefix: [global_prefix][uuid][host_name]prefix - name: the name of this metric - : the delimiter. Default is '.' + - _prefix: [global_prefix][uuid][host_name]prefix + - name: the name of this metric + - : the delimiter. Default is '.' """ def __init__(self, prefix='', delimiter='.'): @@ -211,9 +214,11 @@ class MetricLogger(object): The format of the full metric name is: _prefixname where: - _prefix: [global_prefix][uuid][host_name]prefix - name: the name of this metric - : the delimiter. Default is '.' + - _prefix: [global_prefix][uuid][host_name] + prefix + - name: the name of this metric + - : the delimiter. Default is '.' + :param name: The metric name. :return: The full metric name, with logger prefix, as a string. @@ -240,7 +245,7 @@ class MetricLogger(object): The backend will increment the counter 'name' by the value 'value'. Optionally, specify sample_rate in the interval [0.0, 1.0] to - sample data probabilistically where: + sample data probabilistically where:: P(send metric data) = sample_rate diff --git a/ironic_lib/tests/test_disk_utils.py b/ironic_lib/tests/test_disk_utils.py index 6336f71..20c3434 100644 --- a/ironic_lib/tests/test_disk_utils.py +++ b/ironic_lib/tests/test_disk_utils.py @@ -671,3 +671,572 @@ class OtherFunctionTestCase(test_base.BaseTestCase): return_value=mb + 1) self.assertEqual(2, disk_utils.get_image_mb('x', False)) self.assertEqual(2, disk_utils.get_image_mb('x', True)) + + +@mock.patch.object(utils, 'execute') +class WholeDiskPartitionTestCases(test_base.BaseTestCase): + + def setUp(self): + super(WholeDiskPartitionTestCases, self).setUp() + self.dev = "/dev/fake" + self.config_part_label = "config-2" + self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" + + def test_get_partition_present(self, mock_execute): + blkid_output = '/dev/fake12\n' + mock_execute.side_effect = [(None, ''), (blkid_output, '')] + result = disk_utils._get_labelled_partition(self.dev, + self.config_part_label, + self.node_uuid) + self.assertEqual(blkid_output.rstrip(), result) + execute_calls = [ + mock.call('partprobe', self.dev, run_as_root=True), + mock.call('blkid', '-o', 'device', self.dev, '-t', + 'LABEL=config-2', check_exit_code=[0, 2], + use_standard_locale=True, run_as_root=True) + ] + mock_execute.assert_has_calls(execute_calls) + + def test_get_partition_absent(self, mock_execute): + mock_execute.side_effect = [(None, ''), + (None, '')] + result = disk_utils._get_labelled_partition(self.dev, + self.config_part_label, + self.node_uuid) + self.assertEqual(None, result) + execute_calls = [ + mock.call('partprobe', self.dev, run_as_root=True), + mock.call('blkid', '-o', 'device', self.dev, '-t', + 'LABEL=config-2', check_exit_code=[0, 2], + use_standard_locale=True, run_as_root=True) + ] + mock_execute.assert_has_calls(execute_calls) + + def test_get_partition_DeployFail_exc(self, mock_execute): + blkid_output = '/dev/fake12\n/dev/fake13\n' + mock_execute.side_effect = [(None, ''), (blkid_output, '')] + self.assertRaises(exception.InstanceDeployFailure, + disk_utils._get_labelled_partition, self.dev, + self.config_part_label, self.node_uuid) + execute_calls = [ + mock.call('partprobe', self.dev, run_as_root=True), + mock.call('blkid', '-o', 'device', self.dev, '-t', + 'LABEL=config-2', check_exit_code=[0, 2], + use_standard_locale=True, run_as_root=True) + ] + mock_execute.assert_has_calls(execute_calls) + + @mock.patch.object(disk_utils.LOG, 'error') + def test_get_partition_exc(self, mock_log, mock_execute): + mock_execute.side_effect = processutils.ProcessExecutionError + self.assertRaisesRegex(exception.InstanceDeployFailure, + 'Failed to retrieve partition labels', + disk_utils._get_labelled_partition, self.dev, + self.config_part_label, self.node_uuid) + mock_execute.assert_called_once_with('partprobe', self.dev, + run_as_root=True) + self.assertEqual(1, mock_log.call_count) + + def _test_is_disk_larger_than_max_size(self, mock_execute, blk_out): + mock_execute.return_value = blk_out + result = disk_utils._is_disk_larger_than_max_size(self.dev, + self.node_uuid) + mock_execute.assert_called_once_with('blockdev', '--getsize64', + '/dev/fake', run_as_root=True, + use_standard_locale=True) + return result + + def test_is_disk_larger_than_max_size_false(self, mock_execute): + blkid_out = "53687091200" + ret = self._test_is_disk_larger_than_max_size(mock_execute, + blk_out=blkid_out) + self.assertFalse(ret) + + def test_is_disk_larger_than_max_size_true(self, mock_execute): + blkid_out = "4398046511104" + ret = self._test_is_disk_larger_than_max_size(mock_execute, + blk_out=blkid_out) + self.assertTrue(ret) + + @mock.patch.object(disk_utils.LOG, 'error') + def test_is_disk_larger_than_max_size_exc(self, mock_log, mock_execute): + mock_execute.side_effect = processutils.ProcessExecutionError + self.assertRaisesRegex(exception.InstanceDeployFailure, + 'Failed to get size of disk', + disk_utils._is_disk_larger_than_max_size, + self.dev, self.node_uuid) + mock_execute.assert_called_once_with('blockdev', '--getsize64', + '/dev/fake', run_as_root=True, + use_standard_locale=True) + self.assertEqual(1, mock_log.call_count) + + def test__is_disk_gpt_partitioned_true(self, mock_execute): + blkid_output = 'gpt' + mock_execute.return_value = (blkid_output, '') + result = disk_utils._is_disk_gpt_partitioned('/dev/fake', + self.node_uuid) + self.assertTrue(result) + mock_execute.assert_called_once_with('blkid', '-p', '-o', 'value', + '-s', 'PTTYPE', '/dev/fake', + use_standard_locale=True, + run_as_root=True) + + def test_is_disk_gpt_partitioned_false(self, mock_execute): + blkid_output = 'dos' + mock_execute.return_value = (blkid_output, '') + result = disk_utils._is_disk_gpt_partitioned('/dev/fake', + self.node_uuid) + self.assertFalse(result) + mock_execute.assert_called_once_with('blkid', '-p', '-o', 'value', + '-s', 'PTTYPE', '/dev/fake', + use_standard_locale=True, + run_as_root=True) + + @mock.patch.object(disk_utils.LOG, 'error') + def test_is_disk_gpt_partitioned_exc(self, mock_log, mock_execute): + mock_execute.side_effect = processutils.ProcessExecutionError + self.assertRaisesRegex(exception.InstanceDeployFailure, + 'Failed to retrieve partition table type', + disk_utils._is_disk_gpt_partitioned, + self.dev, self.node_uuid) + mock_execute.assert_called_once_with('blkid', '-p', '-o', 'value', + '-s', 'PTTYPE', '/dev/fake', + use_standard_locale=True, + run_as_root=True) + self.assertEqual(1, mock_log.call_count) + + def test_fix_gpt_structs_fix_required(self, mock_execute): + partprobe_err = """ +Error: The backup GPT table is not at the end of the disk, as it should be. +This might mean that another operating system believes the disk is smaller. +Fix, by moving the backup to the end (and removing the old backup)? +Warning: Not all of the space available to /dev/sdb appears to be used, +you can fix the GPT to use all of the space (an extra 581456476 blocks) +or continue with the current setting? +""" + mock_execute.return_value = ('', partprobe_err) + execute_calls = [ + mock.call('partprobe', '/dev/fake', use_standard_locale=True, + run_as_root=True), + mock.call('sgdisk', '-e', '/dev/fake', run_as_root=True) + ] + disk_utils._fix_gpt_structs('/dev/fake', self.node_uuid) + mock_execute.assert_has_calls(execute_calls) + + def test_fix_gpt_structs_fix_not_required(self, mock_execute): + mock_execute.return_value = ('', '') + + disk_utils._fix_gpt_structs('/dev/fake', self.node_uuid) + mock_execute.assert_called_once_with('partprobe', '/dev/fake', + use_standard_locale=True, + run_as_root=True) + + @mock.patch.object(disk_utils.LOG, 'error') + def test_fix_gpt_structs_exc(self, mock_log, mock_execute): + mock_execute.side_effect = processutils.ProcessExecutionError + self.assertRaisesRegex(exception.InstanceDeployFailure, + 'Failed to fix GPT data structures on disk', + disk_utils._fix_gpt_structs, + self.dev, self.node_uuid) + mock_execute.assert_called_once_with('partprobe', '/dev/fake', + use_standard_locale=True, + run_as_root=True) + self.assertEqual(1, mock_log.call_count) + + +class WholeDiskConfigDriveTestCases(test_base.BaseTestCase): + + def setUp(self): + super(WholeDiskConfigDriveTestCases, self).setUp() + self.dev = "/dev/fake" + self.config_part_label = "config-2" + self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" + + @mock.patch.object(utils, 'execute', autospec=True) + @mock.patch.object(utils, 'unlink_without_raise', + autospec=True) + @mock.patch.object(disk_utils, 'dd', + autospec=True) + @mock.patch.object(disk_utils, '_fix_gpt_structs', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_gpt_partitioned', + autospec=True) + @mock.patch.object(disk_utils, 'list_partitions', + autospec=True) + @mock.patch.object(disk_utils, '_get_labelled_partition', + autospec=True) + @mock.patch.object(disk_utils, '_get_configdrive', + autospec=True) + def test_create_partition_exists(self, mock_get_configdrive, + mock_get_labelled_partition, + mock_list_partitions, + mock_is_disk_gpt, mock_fix_gpt, + mock_dd, mock_unlink, mock_execute): + config_url = 'http://1.2.3.4/cd' + configdrive_part = '/dev/fake-part1' + configdrive_file = '/tmp/xyz' + configdrive_mb = 10 + + mock_get_labelled_partition.return_value = configdrive_part + mock_get_configdrive.return_value = (configdrive_mb, configdrive_file) + disk_utils.create_config_drive_partition(self.node_uuid, self.dev, + config_url) + mock_get_configdrive.assert_called_with(config_url, self.node_uuid) + mock_get_labelled_partition.assert_called_with(self.dev, + self.config_part_label, + self.node_uuid) + self.assertFalse(mock_list_partitions.called) + self.assertFalse(mock_is_disk_gpt.called) + self.assertFalse(mock_execute.called) + mock_dd.assert_called_with(configdrive_file, configdrive_part) + mock_unlink.assert_called_with(configdrive_file) + + @mock.patch.object(utils, 'execute', autospec=True) + @mock.patch.object(utils, 'unlink_without_raise', + autospec=True) + @mock.patch.object(disk_utils, 'dd', + autospec=True) + @mock.patch.object(disk_utils, '_fix_gpt_structs', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_gpt_partitioned', + autospec=True) + @mock.patch.object(disk_utils, 'list_partitions', + autospec=True) + @mock.patch.object(disk_utils, '_get_labelled_partition', + autospec=True) + @mock.patch.object(disk_utils, '_get_configdrive', + autospec=True) + def test_create_partition_gpt(self, mock_get_configdrive, + mock_get_labelled_partition, + mock_list_partitions, + mock_is_disk_gpt, mock_fix_gpt, + mock_dd, mock_unlink, mock_execute): + config_url = 'http://1.2.3.4/cd' + configdrive_file = '/tmp/xyz' + configdrive_mb = 10 + + initial_partitions = [{'end': 49152, 'number': 1, 'start': 1, + 'flags': 'boot', 'filesystem': 'ext4', + 'size': 49151}, + {'end': 51099, 'number': 3, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 5, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}] + updated_partitions = [{'end': 49152, 'number': 1, 'start': 1, + 'flags': 'boot', 'filesystem': 'ext4', + 'size': 49151}, + {'end': 51099, 'number': 3, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 4, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 5, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}] + + mock_get_configdrive.return_value = (configdrive_mb, configdrive_file) + mock_get_labelled_partition.return_value = None + + mock_is_disk_gpt.return_value = True + mock_list_partitions.side_effect = [initial_partitions, + updated_partitions] + expected_part = '/dev/fake4' + disk_utils.create_config_drive_partition(self.node_uuid, self.dev, + config_url) + mock_execute.assert_called_with('sgdisk', '-n', '0:-64MB:0', + self.dev, run_as_root=True) + self.assertEqual(2, mock_list_partitions.call_count) + mock_is_disk_gpt.assert_called_with(self.dev, self.node_uuid) + mock_fix_gpt.assert_called_with(self.dev, self.node_uuid) + mock_dd.assert_called_with(configdrive_file, expected_part) + mock_unlink.assert_called_with(configdrive_file) + + @mock.patch.object(utils, 'execute', autospec=True) + @mock.patch.object(disk_utils.LOG, 'warning') + @mock.patch.object(utils, 'unlink_without_raise', + autospec=True) + @mock.patch.object(disk_utils, 'dd', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_larger_than_max_size', + autospec=True) + @mock.patch.object(disk_utils, '_fix_gpt_structs', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_gpt_partitioned', + autospec=True) + @mock.patch.object(disk_utils, 'list_partitions', + autospec=True) + @mock.patch.object(disk_utils, '_get_labelled_partition', + autospec=True) + @mock.patch.object(disk_utils, '_get_configdrive', + autospec=True) + def _test_create_partition_mbr(self, mock_get_configdrive, + mock_get_labelled_partition, + mock_list_partitions, + mock_is_disk_gpt, mock_fix_gpt, + mock_disk_exceeds, mock_dd, + mock_unlink, mock_log, mock_execute, + disk_size_exceeds_max=False, + is_iscsi_device=False): + config_url = 'http://1.2.3.4/cd' + configdrive_file = '/tmp/xyz' + configdrive_mb = 10 + mock_disk_exceeds.return_value = disk_size_exceeds_max + + initial_partitions = [{'end': 49152, 'number': 1, 'start': 1, + 'flags': 'boot', 'filesystem': 'ext4', + 'size': 49151}, + {'end': 51099, 'number': 3, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 5, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}] + updated_partitions = [{'end': 49152, 'number': 1, 'start': 1, + 'flags': 'boot', 'filesystem': 'ext4', + 'size': 49151}, + {'end': 51099, 'number': 3, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 4, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 5, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}] + mock_list_partitions.side_effect = [initial_partitions, + initial_partitions, + updated_partitions] + mock_get_configdrive.return_value = (configdrive_mb, configdrive_file) + mock_get_labelled_partition.return_value = None + mock_is_disk_gpt.return_value = False + + self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" + if is_iscsi_device: + self.dev = ('/dev/iqn.2008-10.org.openstack:%s.fake' % + self.node_uuid) + expected_part = '%s-part4' % self.dev + else: + expected_part = '/dev/fake4' + + disk_utils.create_config_drive_partition(self.node_uuid, self.dev, + config_url) + mock_get_configdrive.assert_called_with(config_url, self.node_uuid) + if disk_size_exceeds_max: + self.assertEqual(1, mock_log.call_count) + mock_execute.assert_called_with('parted', '-a', 'optimal', '-s', + '--', self.dev, 'mkpart', + 'primary', 'ext2', 2097087, + 2097151, run_as_root=True) + else: + self.assertEqual(0, mock_log.call_count) + mock_execute.assert_called_with('parted', '-a', 'optimal', '-s', + '--', self.dev, 'mkpart', + 'primary', 'ext2', '-64MiB', + '-0', run_as_root=True) + self.assertEqual(3, mock_list_partitions.call_count) + mock_is_disk_gpt.assert_called_with(self.dev, self.node_uuid) + mock_disk_exceeds.assert_called_with(self.dev, self.node_uuid) + mock_dd.assert_called_with(configdrive_file, expected_part) + mock_unlink.assert_called_with(configdrive_file) + self.assertFalse(mock_fix_gpt.called) + + def test__create_partition_mbr_disk_under_2TB(self): + self._test_create_partition_mbr(disk_size_exceeds_max=False, + is_iscsi_device=True) + + def test__create_partition_mbr_disk_exceeds_2TB(self): + self._test_create_partition_mbr(disk_size_exceeds_max=True, + is_iscsi_device=False) + + @mock.patch.object(utils, 'execute', autospec=True) + @mock.patch.object(utils, 'unlink_without_raise', + autospec=True) + @mock.patch.object(disk_utils, 'dd', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_larger_than_max_size', + autospec=True) + @mock.patch.object(disk_utils, '_fix_gpt_structs', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_gpt_partitioned', + autospec=True) + @mock.patch.object(disk_utils, 'list_partitions', + autospec=True) + @mock.patch.object(disk_utils, '_get_labelled_partition', + autospec=True) + @mock.patch.object(disk_utils, '_get_configdrive', + autospec=True) + def test_create_partition_part_create_fail(self, mock_get_configdrive, + mock_get_labelled_partition, + mock_list_partitions, + mock_is_disk_gpt, mock_fix_gpt, + mock_disk_exceeds, mock_dd, + mock_unlink, mock_execute): + config_url = 'http://1.2.3.4/cd' + configdrive_file = '/tmp/xyz' + configdrive_mb = 10 + + initial_partitions = [{'end': 49152, 'number': 1, 'start': 1, + 'flags': 'boot', 'filesystem': 'ext4', + 'size': 49151}, + {'end': 51099, 'number': 3, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 5, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}] + updated_partitions = [{'end': 49152, 'number': 1, 'start': 1, + 'flags': 'boot', 'filesystem': 'ext4', + 'size': 49151}, + {'end': 51099, 'number': 3, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 5, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}] + mock_get_configdrive.return_value = (configdrive_mb, configdrive_file) + mock_get_labelled_partition.return_value = None + mock_is_disk_gpt.return_value = False + mock_disk_exceeds.return_value = False + mock_list_partitions.side_effect = [initial_partitions, + initial_partitions, + updated_partitions] + + self.assertRaisesRegex(exception.InstanceDeployFailure, + 'Disk partitioning failed on device', + disk_utils.create_config_drive_partition, + self.node_uuid, self.dev, config_url) + + mock_get_configdrive.assert_called_with(config_url, self.node_uuid) + mock_execute.assert_called_with('parted', '-a', 'optimal', '-s', '--', + self.dev, 'mkpart', 'primary', + 'ext2', '-64MiB', '-0', + run_as_root=True) + self.assertEqual(3, mock_list_partitions.call_count) + mock_is_disk_gpt.assert_called_with(self.dev, self.node_uuid) + mock_disk_exceeds.assert_called_with(self.dev, self.node_uuid) + self.assertFalse(mock_fix_gpt.called) + self.assertFalse(mock_dd.called) + mock_unlink.assert_called_with(configdrive_file) + + @mock.patch.object(utils, 'execute', autospec=True) + @mock.patch.object(utils, 'unlink_without_raise', + autospec=True) + @mock.patch.object(disk_utils, 'dd', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_larger_than_max_size', + autospec=True) + @mock.patch.object(disk_utils, '_fix_gpt_structs', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_gpt_partitioned', + autospec=True) + @mock.patch.object(disk_utils, 'list_partitions', + autospec=True) + @mock.patch.object(disk_utils, '_get_labelled_partition', + autospec=True) + @mock.patch.object(disk_utils, '_get_configdrive', + autospec=True) + def test_create_partition_part_create_exc(self, mock_get_configdrive, + mock_get_labelled_partition, + mock_list_partitions, + mock_is_disk_gpt, mock_fix_gpt, + mock_disk_exceeds, mock_dd, + mock_unlink, mock_execute): + config_url = 'http://1.2.3.4/cd' + configdrive_file = '/tmp/xyz' + configdrive_mb = 10 + + initial_partitions = [{'end': 49152, 'number': 1, 'start': 1, + 'flags': 'boot', 'filesystem': 'ext4', + 'size': 49151}, + {'end': 51099, 'number': 3, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 5, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}] + mock_get_configdrive.return_value = (configdrive_mb, configdrive_file) + mock_get_labelled_partition.return_value = None + mock_is_disk_gpt.return_value = False + mock_disk_exceeds.return_value = False + mock_list_partitions.side_effect = [initial_partitions, + initial_partitions] + + mock_execute.side_effect = processutils.ProcessExecutionError + + self.assertRaisesRegex(exception.InstanceDeployFailure, + 'Failed to create config drive on disk', + disk_utils.create_config_drive_partition, + self.node_uuid, self.dev, config_url) + + mock_get_configdrive.assert_called_with(config_url, self.node_uuid) + mock_execute.assert_called_with('parted', '-a', 'optimal', '-s', '--', + self.dev, 'mkpart', 'primary', + 'ext2', '-64MiB', '-0', + run_as_root=True) + self.assertEqual(2, mock_list_partitions.call_count) + mock_is_disk_gpt.assert_called_with(self.dev, self.node_uuid) + mock_disk_exceeds.assert_called_with(self.dev, self.node_uuid) + self.assertFalse(mock_fix_gpt.called) + self.assertFalse(mock_dd.called) + mock_unlink.assert_called_with(configdrive_file) + + @mock.patch.object(utils, 'unlink_without_raise', + autospec=True) + @mock.patch.object(disk_utils, 'dd', + autospec=True) + @mock.patch.object(disk_utils, '_fix_gpt_structs', + autospec=True) + @mock.patch.object(disk_utils, '_is_disk_gpt_partitioned', + autospec=True) + @mock.patch.object(disk_utils, 'list_partitions', + autospec=True) + @mock.patch.object(disk_utils, '_get_labelled_partition', + autospec=True) + @mock.patch.object(disk_utils, '_get_configdrive', + autospec=True) + def test_create_partition_num_parts_exceed(self, mock_get_configdrive, + mock_get_labelled_partition, + mock_list_partitions, + mock_is_disk_gpt, mock_fix_gpt, + mock_dd, mock_unlink): + config_url = 'http://1.2.3.4/cd' + configdrive_file = '/tmp/xyz' + configdrive_mb = 10 + + partitions = [{'end': 49152, 'number': 1, 'start': 1, + 'flags': 'boot', 'filesystem': 'ext4', + 'size': 49151}, + {'end': 51099, 'number': 2, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 3, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}, + {'end': 51099, 'number': 4, 'start': 49153, + 'flags': '', 'filesystem': '', 'size': 2046}] + mock_get_configdrive.return_value = (configdrive_mb, configdrive_file) + mock_get_labelled_partition.return_value = None + mock_is_disk_gpt.return_value = False + mock_list_partitions.side_effect = [partitions, partitions] + + self.assertRaisesRegex(exception.InstanceDeployFailure, + 'Config drive cannot be created for node', + disk_utils.create_config_drive_partition, + self.node_uuid, self.dev, config_url) + + mock_get_configdrive.assert_called_with(config_url, self.node_uuid) + self.assertEqual(2, mock_list_partitions.call_count) + mock_is_disk_gpt.assert_called_with(self.dev, self.node_uuid) + self.assertFalse(mock_fix_gpt.called) + self.assertFalse(mock_dd.called) + mock_unlink.assert_called_with(configdrive_file) + + @mock.patch.object(utils, 'execute', autospec=True) + @mock.patch.object(utils, 'unlink_without_raise', + autospec=True) + @mock.patch.object(disk_utils, '_get_labelled_partition', + autospec=True) + @mock.patch.object(disk_utils, '_get_configdrive', + autospec=True) + def test_create_partition_conf_drive_sz_exceed(self, mock_get_configdrive, + mock_get_labelled_partition, + mock_unlink, mock_execute): + config_url = 'http://1.2.3.4/cd' + configdrive_file = '/tmp/xyz' + configdrive_mb = 65 + + mock_get_configdrive.return_value = (configdrive_mb, configdrive_file) + mock_get_labelled_partition.return_value = None + + self.assertRaisesRegex(exception.InstanceDeployFailure, + 'Config drive size exceeds maximum limit', + disk_utils.create_config_drive_partition, + self.node_uuid, self.dev, config_url) + + mock_get_configdrive.assert_called_with(config_url, self.node_uuid) + mock_unlink.assert_called_with(configdrive_file) diff --git a/ironic_lib/tests/test_utils.py b/ironic_lib/tests/test_utils.py index f07e508..b8d1aa2 100644 --- a/ironic_lib/tests/test_utils.py +++ b/ironic_lib/tests/test_utils.py @@ -279,3 +279,52 @@ class IsHttpUrlTestCase(test_base.BaseTestCase): self.assertTrue(utils.is_http_url('HTTPS://127.3.2.1')) self.assertFalse(utils.is_http_url('Zm9vYmFy')) self.assertFalse(utils.is_http_url('11111111')) + + +class ParseRootDeviceTestCase(test_base.BaseTestCase): + + def setUp(self): + super(ParseRootDeviceTestCase, self).setUp() + self.root_device = { + 'wwn': '123456', 'model': 'foo-model', 'size': 12345, + 'serial': 'foo-serial', 'vendor': 'foo-vendor', 'name': '/dev/sda', + 'wwn_with_extension': '123456111', 'wwn_vendor_extension': '111', + 'rotational': True} + + def test_parse_root_device_hints(self): + result = utils.parse_root_device_hints(self.root_device) + self.assertEqual(self.root_device, result) + + def test_parse_root_device_hints_no_hints(self): + result = utils.parse_root_device_hints({}) + self.assertIsNone(result) + + def test_parse_root_device_hints_convert_size(self): + result = utils.parse_root_device_hints({'size': '12345'}) + self.assertEqual({'size': 12345}, result) + + def test_parse_root_device_hints_invalid_size(self): + for value in ('not-int', -123, 0): + self.assertRaises(ValueError, utils.parse_root_device_hints, + {'size': value}) + + def _parse_root_device_hints_convert_rotational(self, values, + expected_value): + for value in values: + result = utils.parse_root_device_hints({'rotational': value}) + self.assertEqual({'rotational': expected_value}, result) + + def test_parse_root_device_hints_convert_rotational(self): + self._parse_root_device_hints_convert_rotational( + (True, 'true', 'on', 'y', 'yes'), True) + + self._parse_root_device_hints_convert_rotational( + (False, 'false', 'off', 'n', 'no'), False) + + def test_parse_root_device_hints_invalid_rotational(self): + self.assertRaises(ValueError, utils.parse_root_device_hints, + {'rotational': 'not-bool'}) + + def test_parse_root_device_hints_non_existent_hint(self): + self.assertRaises(ValueError, utils.parse_root_device_hints, + {'non-existent': 'foo'}) diff --git a/ironic_lib/utils.py b/ironic_lib/utils.py index d731d22..9f32631 100644 --- a/ironic_lib/utils.py +++ b/ironic_lib/utils.py @@ -18,6 +18,7 @@ """Utilities and helper functions.""" +import copy import errno import logging import os @@ -25,7 +26,9 @@ import os from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import excutils +from oslo_utils import strutils +from ironic_lib.common.i18n import _ from ironic_lib.common.i18n import _LE from ironic_lib.common.i18n import _LW from ironic_lib import exception @@ -43,6 +46,11 @@ CONF.register_opts(utils_opts, group='ironic_lib') LOG = logging.getLogger(__name__) +VALID_ROOT_DEVICE_HINTS = set(('size', 'model', 'wwn', 'serial', 'vendor', + 'wwn_with_extension', 'wwn_vendor_extension', + 'name', 'rotational')) + + def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method. @@ -159,3 +167,57 @@ def is_http_url(url): def list_opts(): """Entry point for oslo-config-generator.""" return [('ironic_lib', utils_opts)] + + +def parse_root_device_hints(root_device): + """Parse the root_device property of a node. + + Parses and validates the root_device property of a node. These are + hints for how a node's root device is created. The 'size' hint + should be a positive integer. The 'rotational' hint should be a + Boolean value. + + :param root_device: the root_device dictionary from the node's property. + :returns: a dictionary with the root device hints parsed or + None if there are no hints. + :raises: ValueError, if some information is invalid. + + """ + if not root_device: + return + + root_device = copy.deepcopy(root_device) + + invalid_hints = set(root_device) - VALID_ROOT_DEVICE_HINTS + if invalid_hints: + raise ValueError( + _('The hints "%(invalid_hints)s" are invalid. ' + 'Valid hints are: "%(valid_hints)s"') % + {'invalid_hints': ', '.join(invalid_hints), + 'valid_hints': ', '.join(VALID_ROOT_DEVICE_HINTS)}) + + if 'size' in root_device: + try: + size = int(root_device['size']) + except ValueError: + raise ValueError( + _('Root device hint "size" is not an integer value. ' + 'Current value: %s') % root_device['size']) + + if size <= 0: + raise ValueError( + _('Root device hint "size" should be a positive integer. ' + 'Current value: %d') % size) + + root_device['size'] = size + + if 'rotational' in root_device: + try: + root_device['rotational'] = strutils.bool_from_string( + root_device['rotational'], strict=True) + except ValueError: + raise ValueError( + _('Root device hint "rotational" is not a Boolean value. ' + 'Current value: %s') % root_device['rotational']) + + return root_device diff --git a/ironic_lib/version.py b/ironic_lib/version.py new file mode 100644 index 0000000..cd45253 --- /dev/null +++ b/ironic_lib/version.py @@ -0,0 +1,18 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +version_info = pbr.version.VersionInfo('ironic_lib') diff --git a/requirements.txt b/requirements.txt index 1159ddb..9b1b50b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,10 +4,10 @@ pbr>=1.6 # Apache-2.0 oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config>=3.10.0 # Apache-2.0 +oslo.config>=3.14.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.14.0 # Apache-2.0 +oslo.utils>=3.16.0 # Apache-2.0 requests>=2.10.0 # Apache-2.0 six>=1.9.0 # MIT oslo.log>=1.14.0 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg index b48475e..3f102a8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,3 +27,12 @@ oslo.config.opts = ironic_lib.utils = ironic_lib.utils:list_opts ironic_lib.metrics = ironic_lib.metrics_utils:list_opts ironic_lib.metrics_statsd = ironic_lib.metrics_statsd:list_opts + +[pbr] +autodoc_index_modules = True +warnerrors = True + +[build_sphinx] +all_files = 1 +build-dir = doc/build +source-dir = doc/source diff --git a/test-requirements.txt b/test-requirements.txt index 6a7c98a..b6f0e36 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,3 +10,8 @@ os-testr>=0.7.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT + +# Doc requirements +doc8 # Apache-2.0 +sphinx!=1.3b1,<1.3,>=1.2.1 # BSD +oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 diff --git a/tools/tox_install.sh b/tools/tox_install.sh new file mode 100755 index 0000000..47136ea --- /dev/null +++ b/tools/tox_install.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# Client constraint file contains this client version pin that is in conflict +# with installing the client from source. We should replace the version pin in +# the constraints file before applying it for from-source installation. + +ZUUL_CLONER=/usr/zuul-env/bin/zuul-cloner +BRANCH_NAME=master +CLIENT_NAME=ironic-lib +requirements_installed=$(echo "import openstack_requirements" | python 2>/dev/null ; echo $?) + +set -e + +CONSTRAINTS_FILE=$1 +shift + +install_cmd="pip install" +if [ $CONSTRAINTS_FILE != "unconstrained" ]; then + + mydir=$(mktemp -dt "$CLIENT_NAME-tox_install-XXXXXXX") + localfile=$mydir/upper-constraints.txt + if [[ $CONSTRAINTS_FILE != http* ]]; then + CONSTRAINTS_FILE=file://$CONSTRAINTS_FILE + fi + curl $CONSTRAINTS_FILE -k -o $localfile + install_cmd="$install_cmd -c$localfile" + + if [ $requirements_installed -eq 0 ]; then + echo "ALREADY INSTALLED" > /tmp/tox_install.txt + echo "Requirements already installed; using existing package" + elif [ -x "$ZUUL_CLONER" ]; then + export ZUUL_BRANCH=${ZUUL_BRANCH-$BRANCH} + echo "ZUUL CLONER" > /tmp/tox_install.txt + pushd $mydir + $ZUUL_CLONER --cache-dir \ + /opt/git \ + --branch $BRANCH_NAME \ + git://git.openstack.org \ + openstack/requirements + cd openstack/requirements + $install_cmd -e . + popd + else + echo "PIP HARDCODE" > /tmp/tox_install.txt + if [ -z "$REQUIREMENTS_PIP_LOCATION" ]; then + REQUIREMENTS_PIP_LOCATION="git+https://git.openstack.org/openstack/requirements@$BRANCH_NAME#egg=requirements" + fi + $install_cmd -U -e ${REQUIREMENTS_PIP_LOCATION} + fi + + edit-constraints $localfile -- $CLIENT_NAME "-e file://$PWD#egg=$CLIENT_NAME" +fi + +$install_cmd -U $* +exit $? diff --git a/tox.ini b/tox.ini index 7e912ae..8aaa327 100644 --- a/tox.ini +++ b/tox.ini @@ -4,6 +4,8 @@ skipsdist = True envlist = py34,py27,pep8 [testenv] +install_command = + {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} usedevelop = True setenv = VIRTUAL_ENV={envdir} PYTHONDONTWRITEBYTECODE = 1 @@ -18,7 +20,9 @@ ignore = E129 exclude = .venv,.tox,dist,doc,*.egg,.update-venv [testenv:pep8] -commands = flake8 {posargs} +commands = + flake8 {posargs} + doc8 README.rst doc/source --ignore D001 [testenv:cover] setenv = VIRTUALENV={envdir} @@ -28,3 +32,10 @@ commands = [testenv:venv] commands = {posargs} + +[testenv:docs] +setenv = PYTHONHASHSEED=0 +sitepackages = False +envdir = {toxworkdir}/venv +commands = + python setup.py build_sphinx