Support for stable/newton

Add patches for stable/newton branches of ironic and nova.

Change-Id: I182812051fd2a21b8a4e8d51c2096537df159509
This commit is contained in:
Andrii Ostapenko 2017-01-12 10:09:31 +02:00
parent 76c9de218a
commit 500db81c99
2 changed files with 1357 additions and 0 deletions

View File

@ -0,0 +1,403 @@
From de0d00b6e9ebaced3ad49d050af87cc3f7c6382b Mon Sep 17 00:00:00 2001
From: Dmitry Bogun <dbogun@mirantis.com>
Date: Mon, 28 Nov 2016 17:06:41 +0200
Subject: [PATCH] "newton" bareon patch
---
ironic/api/controllers/v1/node.py | 5 +++
ironic/common/context.py | 1 -
ironic/common/images.py | 13 ++++---
ironic/common/states.py | 6 +++
ironic/common/swift.py | 21 +++++++++++
ironic/conductor/manager.py | 5 +++
ironic/conductor/task_manager.py | 2 +-
ironic/drivers/base.py | 7 ++++
ironic/drivers/modules/image_cache.py | 43 ++++++++++++++--------
ironic/tests/unit/common/test_rpc.py | 6 +--
ironic/tests/unit/common/test_swift.py | 1 +
.../tests/unit/drivers/modules/test_image_cache.py | 17 +++++----
12 files changed, 91 insertions(+), 36 deletions(-)
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index 5cb2ccc..a28e6ea 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -35,6 +35,7 @@ from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import versions
from ironic.api import expose
from ironic.common import exception
+from ironic.common import driver_factory
from ironic.common.i18n import _
from ironic.common import policy
from ironic.common import states as ir_states
@@ -538,6 +539,10 @@ class NodeStatesController(rest.RestController):
m = ir_states.machine.copy()
m.initialize(rpc_node.provision_state)
+
+ ir_states.apply_driver_patches(
+ m, driver_factory.get_driver(rpc_node.driver))
+
if not m.is_actionable_event(ir_states.VERBS.get(target, target)):
# Normally, we let the task manager recognize and deal with
# NodeLocked exceptions. However, that isn't done until the RPC
diff --git a/ironic/common/context.py b/ironic/common/context.py
index 6e9c7e3..f708e4d 100644
--- a/ironic/common/context.py
+++ b/ironic/common/context.py
@@ -71,7 +71,6 @@ class RequestContext(context.RequestContext):
@classmethod
def from_dict(cls, values):
values.pop('user', None)
- values.pop('tenant', None)
return cls(**values)
def ensure_thread_contain_context(self):
diff --git a/ironic/common/images.py b/ironic/common/images.py
index 908671c..ddc87e1 100644
--- a/ironic/common/images.py
+++ b/ironic/common/images.py
@@ -292,16 +292,17 @@ def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk,
raise exception.ImageCreationFailed(image_type='iso', error=e)
-def fetch(context, image_href, path, force_raw=False):
+def fetch(context, image_href, path, force_raw=False, image_service=None):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
- image_service = service.get_image_service(image_href,
- context=context)
- LOG.debug("Using %(image_service)s to download image %(image_href)s." %
- {'image_service': image_service.__class__,
- 'image_href': image_href})
+ if not image_service:
+ image_service = service.get_image_service(image_href,
+ context=context)
+ LOG.debug("Using %(image_service)s to download image %(image_href)s." %
+ {'image_service': image_service.__class__,
+ 'image_href': image_href})
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
diff --git a/ironic/common/states.py b/ironic/common/states.py
index f14ce5e..9de5c2b 100644
--- a/ironic/common/states.py
+++ b/ironic/common/states.py
@@ -382,3 +382,9 @@ machine.add_transition(ADOPTFAIL, ADOPTING, 'adopt')
# A node that failed adoption can be moved back to manageable
machine.add_transition(ADOPTFAIL, MANAGEABLE, 'manage')
+
+
+def apply_driver_patches(machine, driver):
+ # Is deployment can be terminated
+ if driver.deploy.can_terminate_deployment:
+ machine.add_transition(DEPLOYING, DELETING, 'delete')
diff --git a/ironic/common/swift.py b/ironic/common/swift.py
index c16cb3c..e8452e0 100644
--- a/ironic/common/swift.py
+++ b/ironic/common/swift.py
@@ -173,3 +173,24 @@ class SwiftAPI(object):
except swift_exceptions.ClientException as e:
operation = _("post object")
raise exception.SwiftOperationError(operation=operation, error=e)
+
+ # FIXME(dbogun): make tenant bound connection
+ def get_object(self, container, object, object_headers=None,
+ chunk_size=None):
+ """Get Swift object.
+
+ :param container: The name of the container in which Swift object
+ is placed.
+ :param object: The name of the object in Swift
+ :param object_headers: the headers for the object to pass to Swift
+ :param chunk_size: size of the chunk used read to read from response
+ :returns: Tuple (body, headers)
+ :raises: SwiftOperationError, if operation with Swift fails.
+ """
+ try:
+ return self.connection.get_object(container, object,
+ headers=object_headers,
+ resp_chunk_size=chunk_size)
+ except swift_exceptions.ClientException as e:
+ operation = _("get object")
+ raise exception.SwiftOperationError(operation=operation, error=e)
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index a83c71b..d63cd06 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -515,6 +515,11 @@ class ConductorManager(base_manager.BaseConductorManager):
"""
LOG.debug("RPC do_node_tear_down called for node %s." % node_id)
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ if (task.node.provision_state == states.DEPLOYING and
+ task.driver.deploy.can_terminate_deployment):
+ task.driver.deploy.terminate_deployment(task)
+
with task_manager.acquire(context, node_id, shared=False,
purpose='node tear down') as task:
try:
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index a709e48..8993cf7 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -217,7 +217,7 @@ class TaskManager(object):
self.node.id)
self.driver = driver_factory.build_driver_for_task(
self, driver_name=driver_name)
-
+ states.apply_driver_patches(self.fsm, self.driver)
except Exception:
with excutils.save_and_reraise_exception():
self.release_resources()
diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py
index af4aab9..e7b966a 100644
--- a/ironic/drivers/base.py
+++ b/ironic/drivers/base.py
@@ -401,6 +401,13 @@ class DeployInterface(BaseInterface):
'the driver %(driver)s does not support heartbeating'),
{'node': task.node.uuid, 'driver': task.node.driver})
+ def terminate_deployment(self, *args, **kwargs):
+ pass
+
+ @property
+ def can_terminate_deployment(self):
+ return False
+
@six.add_metaclass(abc.ABCMeta)
class BootInterface(object):
diff --git a/ironic/drivers/modules/image_cache.py b/ironic/drivers/modules/image_cache.py
index 541cce4..42e0026 100644
--- a/ironic/drivers/modules/image_cache.py
+++ b/ironic/drivers/modules/image_cache.py
@@ -26,6 +26,7 @@ import uuid
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import fileutils
+from oslo_utils import uuidutils
import six
from ironic.common import exception
@@ -48,7 +49,8 @@ _cache_cleanup_list = []
class ImageCache(object):
"""Class handling access to cache for master images."""
- def __init__(self, master_dir, cache_size, cache_ttl):
+ def __init__(self, master_dir, cache_size, cache_ttl,
+ image_service=None):
"""Constructor.
:param master_dir: cache directory to work on
@@ -59,6 +61,7 @@ class ImageCache(object):
self.master_dir = master_dir
self._cache_size = cache_size
self._cache_ttl = cache_ttl
+ self._image_service = image_service
if master_dir is not None:
fileutils.ensure_tree(master_dir)
@@ -83,23 +86,28 @@ class ImageCache(object):
# NOTE(ghe): We don't share images between instances/hosts
if not CONF.parallel_image_downloads:
with lockutils.lock(img_download_lock_name, 'ironic-'):
- _fetch(ctx, href, dest_path, force_raw)
+ _fetch(ctx, href, dest_path,
+ image_service=self._image_service,
+ force_raw=force_raw)
else:
- _fetch(ctx, href, dest_path, force_raw)
+ _fetch(ctx, href, dest_path, image_service=self._image_service,
+ force_raw=force_raw)
return
# TODO(ghe): have hard links and counts the same behaviour in all fs
- # NOTE(vdrok): File name is converted to UUID if it's not UUID already,
- # so that two images with same file names do not collide
- if service_utils.is_glance_image(href):
- master_file_name = service_utils.parse_image_ref(href)[0]
+ if uuidutils.is_uuid_like(href):
+ master_file_name = href
+
+ elif (self._image_service and
+ hasattr(self._image_service, 'get_image_unique_id')):
+ master_file_name = self._image_service.get_image_unique_id(href)
+
else:
- # NOTE(vdrok): Doing conversion of href in case it's unicode
- # string, UUID cannot be generated for unicode strings on python 2.
href_encoded = href.encode('utf-8') if six.PY2 else href
master_file_name = str(uuid.uuid5(uuid.NAMESPACE_URL,
href_encoded))
+
master_path = os.path.join(self.master_dir, master_file_name)
if CONF.parallel_image_downloads:
@@ -110,8 +118,8 @@ class ImageCache(object):
# NOTE(vdrok): After rebuild requested image can change, so we
# should ensure that dest_path and master_path (if exists) are
# pointing to the same file and their content is up to date
- cache_up_to_date = _delete_master_path_if_stale(master_path, href,
- ctx)
+ cache_up_to_date = _delete_master_path_if_stale(
+ master_path, href, ctx, img_service=self._image_service)
dest_up_to_date = _delete_dest_path_if_stale(master_path,
dest_path)
@@ -157,7 +165,8 @@ class ImageCache(object):
tmp_path = os.path.join(tmp_dir, href.split('/')[-1])
try:
- _fetch(ctx, href, tmp_path, force_raw)
+ _fetch(ctx, href, tmp_path, force_raw,
+ image_service=self._image_service)
# NOTE(dtantsur): no need for global lock here - master_path
# will have link count >1 at any moment, so won't be cleaned up
os.link(tmp_path, master_path)
@@ -298,10 +307,11 @@ def _free_disk_space_for(path):
return stat.f_frsize * stat.f_bavail
-def _fetch(context, image_href, path, force_raw=False):
+def _fetch(context, image_href, path, force_raw=False, image_service=None):
"""Fetch image and convert to raw format if needed."""
path_tmp = "%s.part" % path
- images.fetch(context, image_href, path_tmp, force_raw=False)
+ images.fetch(context, image_href, path_tmp, force_raw=False,
+ image_service=image_service)
# Notes(yjiang5): If glance can provide the virtual size information,
# then we can firstly clean cache and then invoke images.fetch().
if force_raw:
@@ -374,7 +384,7 @@ def cleanup(priority):
return _add_property_to_class_func
-def _delete_master_path_if_stale(master_path, href, ctx):
+def _delete_master_path_if_stale(master_path, href, ctx, img_service=None):
"""Delete image from cache if it is not up to date with href contents.
:param master_path: path to an image in master cache
@@ -387,7 +397,8 @@ def _delete_master_path_if_stale(master_path, href, ctx):
# Glance image contents cannot be updated without changing image's UUID
return os.path.exists(master_path)
if os.path.exists(master_path):
- img_service = image_service.get_image_service(href, context=ctx)
+ if not img_service:
+ img_service = image_service.get_image_service(href, context=ctx)
img_mtime = img_service.show(href).get('updated_at')
if not img_mtime:
# This means that href is not a glance image and doesn't have an
diff --git a/ironic/tests/unit/common/test_rpc.py b/ironic/tests/unit/common/test_rpc.py
index 197f553..71fe5f8 100644
--- a/ironic/tests/unit/common/test_rpc.py
+++ b/ironic/tests/unit/common/test_rpc.py
@@ -180,10 +180,8 @@ class TestRequestContextSerializer(base.TestCase):
def test_deserialize_context(self):
self.context.user = 'fake-user'
- self.context.tenant = 'fake-tenant'
serialize_values = self.context.to_dict()
new_context = self.serializer.deserialize_context(serialize_values)
- # Ironic RequestContext from_dict will pop 'user' and 'tenant' and
- # initialize to None.
+ # Ironic RequestContext from_dict will pop 'user' and initialize
+ # to None.
self.assertIsNone(new_context.user)
- self.assertIsNone(new_context.tenant)
diff --git a/ironic/tests/unit/common/test_swift.py b/ironic/tests/unit/common/test_swift.py
index e5bc306..e04d8e2 100644
--- a/ironic/tests/unit/common/test_swift.py
+++ b/ironic/tests/unit/common/test_swift.py
@@ -108,6 +108,7 @@ class SwiftTestCase(base.TestCase):
connection_obj_mock.url = 'http://host/v1/AUTH_tenant_id'
head_ret_val = {'x-account-meta-temp-url-key': 'secretkey'}
connection_obj_mock.head_account.return_value = head_ret_val
+ connection_obj_mock.os_options = {}
gen_temp_url_mock.return_value = 'temp-url-path'
temp_url_returned = swiftapi.get_temp_url('container', 'object', 10)
connection_obj_mock.head_account.assert_called_once_with()
diff --git a/ironic/tests/unit/drivers/modules/test_image_cache.py b/ironic/tests/unit/drivers/modules/test_image_cache.py
index 1224f52..ed9d83c 100644
--- a/ironic/tests/unit/drivers/modules/test_image_cache.py
+++ b/ironic/tests/unit/drivers/modules/test_image_cache.py
@@ -59,7 +59,7 @@ class TestImageCacheFetch(base.TestCase):
self.cache.fetch_image(self.uuid, self.dest_path)
self.assertFalse(mock_download.called)
mock_fetch.assert_called_once_with(
- None, self.uuid, self.dest_path, True)
+ None, self.uuid, self.dest_path, True, image_service=None)
self.assertFalse(mock_clean_up.called)
@mock.patch.object(image_cache.ImageCache, 'clean_up', autospec=True)
@@ -75,7 +75,7 @@ class TestImageCacheFetch(base.TestCase):
mock_clean_up):
self.cache.fetch_image(self.uuid, self.dest_path)
mock_cache_upd.assert_called_once_with(self.master_path, self.uuid,
- None)
+ None, img_service=None)
mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path)
self.assertFalse(mock_link.called)
self.assertFalse(mock_download.called)
@@ -94,7 +94,7 @@ class TestImageCacheFetch(base.TestCase):
mock_clean_up):
self.cache.fetch_image(self.uuid, self.dest_path)
mock_cache_upd.assert_called_once_with(self.master_path, self.uuid,
- None)
+ None, img_service=None)
mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path)
mock_link.assert_called_once_with(self.master_path, self.dest_path)
self.assertFalse(mock_download.called)
@@ -113,7 +113,7 @@ class TestImageCacheFetch(base.TestCase):
mock_clean_up):
self.cache.fetch_image(self.uuid, self.dest_path)
mock_cache_upd.assert_called_once_with(self.master_path, self.uuid,
- None)
+ None, img_service=None)
mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path)
self.assertFalse(mock_link.called)
mock_download.assert_called_once_with(
@@ -134,7 +134,7 @@ class TestImageCacheFetch(base.TestCase):
mock_clean_up):
self.cache.fetch_image(self.uuid, self.dest_path)
mock_cache_upd.assert_called_once_with(self.master_path, self.uuid,
- None)
+ None, img_service=None)
mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path)
self.assertFalse(mock_link.called)
mock_download.assert_called_once_with(
@@ -158,7 +158,7 @@ class TestImageCacheFetch(base.TestCase):
@mock.patch.object(image_cache, '_fetch', autospec=True)
def test__download_image(self, mock_fetch):
- def _fake_fetch(ctx, uuid, tmp_path, *args):
+ def _fake_fetch(ctx, uuid, tmp_path, *args, **kwargs):
self.assertEqual(self.uuid, uuid)
self.assertNotEqual(self.dest_path, tmp_path)
self.assertNotEqual(os.path.dirname(tmp_path), self.master_dir)
@@ -446,7 +446,7 @@ class TestImageCacheCleanUp(base.TestCase):
@mock.patch.object(utils, 'rmtree_without_raise', autospec=True)
@mock.patch.object(image_cache, '_fetch', autospec=True)
def test_temp_images_not_cleaned(self, mock_fetch, mock_rmtree):
- def _fake_fetch(ctx, uuid, tmp_path, *args):
+ def _fake_fetch(ctx, uuid, tmp_path, *args, **kwargs):
with open(tmp_path, 'w') as fp:
fp.write("TEST" * 10)
@@ -691,7 +691,8 @@ class TestFetchCleanup(base.TestCase):
mock_size.return_value = 100
image_cache._fetch('fake', 'fake-uuid', '/foo/bar', force_raw=True)
mock_fetch.assert_called_once_with('fake', 'fake-uuid',
- '/foo/bar.part', force_raw=False)
+ '/foo/bar.part', image_service=None,
+ force_raw=False)
mock_clean.assert_called_once_with('/foo', 100)
mock_raw.assert_called_once_with('fake-uuid', '/foo/bar',
'/foo/bar.part')
--
2.7.3

View File

@ -0,0 +1,954 @@
From 63e037b99264bd1b04b43f000608f2fa80919917 Mon Sep 17 00:00:00 2001
From: Andrii Ostapenko <aostapenko@mirantis.com>
Date: Thu, 12 Jan 2017 10:13:16 +0200
Subject: [PATCH] "newton" bareon patch
---
nova/objects/image_meta.py | 1 +
nova/tests/unit/objects/test_objects.py | 2 +-
nova/tests/unit/virt/ironic/test_driver.py | 498 +++++++++++++++++++++++++++--
nova/tests/unit/virt/ironic/utils.py | 14 +-
nova/virt/ironic/driver.py | 165 +++++++++-
5 files changed, 640 insertions(+), 40 deletions(-)
diff --git a/nova/objects/image_meta.py b/nova/objects/image_meta.py
index 2765c53..00c21aa 100644
--- a/nova/objects/image_meta.py
+++ b/nova/objects/image_meta.py
@@ -442,6 +442,7 @@ class ImageMetaProps(base.NovaObject):
# is a fairly generic type. For a detailed type consider os_distro
# instead
'os_type': fields.OSTypeField(),
+ 'deploy_config': fields.StringField(),
}
# The keys are the legacy property names and
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index 6bc72d6..c90dca65 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -1132,7 +1132,7 @@ object_data = {
'HVSpec': '1.2-db672e73304da86139086d003f3977e7',
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
- 'ImageMetaProps': '1.15-d45133ec8d2d4a6456338fb0ffd0e5c2',
+ 'ImageMetaProps': '1.15-f7e534b482cc14d32f671c224144258d',
'Instance': '2.3-4f98ab23f4b0a25fabb1040c8f5edecc',
'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914',
'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33',
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 18cc374..5f4cfa1 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -831,6 +831,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
result = self.driver.macs_for_instance(instance)
self.assertIsNone(result)
+ @mock.patch.object(ironic_driver.IronicDriver, '_get_switch_boot_options')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@@ -839,7 +840,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def _test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
- mock_node, mock_looping, mock_save):
+ mock_node, mock_looping, mock_save, mock_sb_options):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
@@ -877,6 +878,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
+ mock_sb_options.assert_called_once_with(self.ctx, instance, node_uuid)
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
@@ -930,14 +932,62 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.spawn, self.ctx, instance, None, [], None)
self.assertEqual(0, mock_destroy.call_count)
- def _test_add_driver_fields(self, mock_update=None, mock_call=None):
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test__get_switch_boot_options(self, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(
+ driver='fake', uuid=node_uuid,
+ instance_info={
+ 'multiboot': True,
+ 'multiboot_info': {'elements': [
+ {'image_name': "name_1"},
+ {'image_name': "name_2"},
+ ]}}
+ )
+ mock_node.get.return_value = node
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
+
+ self.driver._get_switch_boot_options(self.ctx,
+ instance, node_uuid)
+
+ exp_meta = {'available_images': str(['name_1', 'name_2'])}
+ self.assertEqual(exp_meta, instance.metadata)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test__get_switch_boot_options_not_multiboot(self, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(
+ driver='fake', uuid=node_uuid,
+ instance_info={}
+ )
+ mock_node.get.return_value = node
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
+
+ self.driver._get_switch_boot_options(self.ctx,
+ instance, node_uuid)
+
+ self.assertEqual({}, instance.metadata)
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ "_get_deploy_config_options")
+ def _test_add_driver_fields(self, mock_get_depl_conf_opts,
+ mock_update=None, mock_call=None):
node = ironic_utils.get_test_node(driver='fake')
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ node=node.uuid,
+ expected_attrs=('metadata',))
image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
instance.flavor = flavor
+
+ mock_get_depl_conf_opts.return_value = {'foo': 'bar123'}
+ instance['metadata']['driver_actions'] = 'test_driver_actions'
+
self.driver._add_driver_fields(node, instance, image_meta, flavor)
+
expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta.id},
{'path': '/instance_info/root_gb', 'op': 'add',
@@ -953,7 +1003,14 @@ class IronicDriverTestCase(test.NoDBTestCase):
{'path': '/instance_info/local_gb', 'op': 'add',
'value': str(node.properties.get('local_gb', 0))},
{'path': '/instance_uuid', 'op': 'add',
- 'value': instance.uuid}]
+ 'value': instance.uuid},
+ {'path': '/instance_info/deploy_config_options',
+ 'op': 'add',
+ 'value': {'foo': 'bar123'}},
+ {'path': '/instance_info/driver_actions',
+ 'op': 'add',
+ 'value': 'test_driver_actions'},
+ ]
if mock_call is not None:
# assert call() is invoked with retry_on_conflict False to
@@ -976,14 +1033,82 @@ class IronicDriverTestCase(test.NoDBTestCase):
def test__add_driver_fields_fail(self, mock_update):
mock_update.side_effect = ironic_exception.BadRequest()
node = ironic_utils.get_test_node(driver='fake')
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ node=node.uuid,
+ expected_attrs=('metadata',))
image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.assertRaises(exception.InstanceDeployFailure,
self.driver._add_driver_fields,
node, instance, image_meta, flavor)
+ def test__get_deploy_config_options_all_present(self):
+ node = ironic_utils.get_test_node(
+ driver='fake', driver_info={'deploy_config': "node-conf"})
+ image_meta = ironic_utils.get_test_image_meta(
+ deploy_config="image-conf")
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node.uuid, expected_attrs=('metadata',),
+ metadata={'deploy_config': "instance-conf"})
+
+ opts = self.driver._get_deploy_config_options(node, instance,
+ image_meta)
+
+ expected = {"node": "node-conf",
+ "image": "image-conf",
+ "instance": "instance-conf"
+ }
+ self.assertEqual(expected, opts)
+
+ def test__get_deploy_config_options_on_node_rebuild(self):
+ # In case of rebuild a set of options is also present in the node
+ # already. We take them, override with the new ones, and pass back.
+ node = ironic_utils.get_test_node(
+ driver='fake', driver_info={'deploy_config': "node-conf"},
+ instance_info={"deploy_config_options": {
+ "instance": "previous_inst_conf",
+ "image": "previous_image_conf",
+ }}
+ )
+ image_meta = ironic_utils.get_test_image_meta(
+ deploy_config="image-conf")
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node.uuid, expected_attrs=('metadata',))
+ opts = self.driver._get_deploy_config_options(node, instance,
+ image_meta)
+
+ expected = {"node": "node-conf",
+ "image": "image-conf",
+ "instance": "previous_inst_conf"
+ }
+ self.assertEqual(expected, opts)
+
+ def test__get_deploy_config_options_some_present(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ image_meta = ironic_utils.get_test_image_meta()
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node.uuid, expected_attrs=('metadata',),
+ metadata={'deploy_config': "instance-conf"})
+
+ opts = self.driver._get_deploy_config_options(node, instance,
+ image_meta)
+
+ expected = {"instance": "instance-conf"}
+ self.assertEqual(expected, opts)
+
+ def test__get_deploy_config_options_none_present(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ image_meta = ironic_utils.get_test_image_meta()
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node.uuid, expected_attrs=('metadata',))
+
+ opts = self.driver._get_deploy_config_options(node, instance,
+ image_meta)
+
+ expected = {}
+ self.assertEqual(expected, opts)
+
def _test_remove_driver_fields(self, mock_update):
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
@@ -1010,7 +1135,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
instance.flavor = flavor
mock_node.validate.return_value = ironic_utils.get_test_validation(
@@ -1036,7 +1162,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
instance.flavor = flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
@@ -1068,7 +1195,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
instance.flavor = flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
@@ -1100,7 +1228,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
@@ -1128,7 +1257,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
@@ -1158,7 +1288,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
@@ -1190,7 +1321,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor(ephemeral_gb=1)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=node_uuid, expected_attrs=('metadata',))
instance.flavor = flavor
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
@@ -1584,15 +1716,16 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.refresh_instance_security_rules(fake_group)
mock_risr.assert_called_once_with(fake_group)
+ @mock.patch.object(ironic_driver.IronicDriver, '_get_switch_boot_options')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
- @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate, mock_looping, mock_wait_active,
- preserve=False):
+ mock_sb_options, preserve=False):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
@@ -1603,10 +1736,12 @@ class IronicDriverTestCase(test.NoDBTestCase):
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=self.instance_uuid,
- node=node_uuid,
- instance_type_id=flavor_id)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',))
instance.flavor = flavor
fake_looping_call = FakeLoopingCall()
@@ -1630,6 +1765,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
+ mock_sb_options.assert_called_once_with(self.ctx, instance, node_uuid)
def test_rebuild_preserve_ephemeral(self):
self._test_rebuild(preserve=True)
@@ -1639,7 +1775,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
- @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate):
@@ -1653,10 +1789,12 @@ class IronicDriverTestCase(test.NoDBTestCase):
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=self.instance_uuid,
- node=node_uuid,
- instance_type_id=flavor_id)
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',))
instance.flavor = flavor
exceptions = [
@@ -1672,6 +1810,316 @@ class IronicDriverTestCase(test.NoDBTestCase):
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None)
+ @mock.patch.object(ironic_driver.IronicDriver, '_do_rebuild')
+ @mock.patch.object(ironic_driver.IronicDriver, '_get_switch_boot_options')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ def test_rebuild_multiboot_force_rebuild(self, mock_save, mock_get,
+ mock_driver_fields,
+ mock_set_pstate, mock_looping,
+ mock_wait_active,
+ mock_sb_options, rebuild_mock):
+ node_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=self.instance_uuid,
+ instance_type_id=5,
+ instance_info={'multiboot': True})
+ mock_get.return_value = node
+
+ image_meta = ironic_utils.get_test_image_meta()
+
+ flavor_id = 5
+ flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
+
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',),
+ metadata={'force_rebuild': True})
+ instance.flavor = flavor
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ with mock.patch.object(objects.ImageMeta,
+ 'from_dict') as mock_image_meta_from_dict:
+ mock_image_meta_from_dict.return_value = image_meta
+ self.driver.rebuild(
+ context=self.ctx, instance=instance, image_meta=image_meta,
+ injected_files=None, admin_password=None, bdms=None,
+ detach_block_devices=None, attach_block_devices=None,
+ preserve_ephemeral=False)
+
+ rebuild_mock.assert_called_once_with(
+ self.ctx, FAKE_CLIENT_WRAPPER, node, instance, image_meta,
+ None,
+ None, None, None,
+ None, network_info=None,
+ recreate=False,
+ block_device_info=None,
+ preserve_ephemeral=False)
+
+ @mock.patch.object(objects.ImageMeta, 'from_dict')
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(ironic_driver.IronicDriver, '_do_switch_boot_device')
+ @mock.patch.object(objects.Instance, 'save')
+ def test_rebuild_multiboot_switch_boot(self, mock_save,
+ mock_sb, mock_get,
+ mock_get_by_instance,
+ mock_image_meta_from_dict):
+ node_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=self.instance_uuid,
+ instance_type_id=5,
+ instance_info={'multiboot': True})
+ mock_get.return_value = mock_get_by_instance.return_value = node
+
+ image_meta = ironic_utils.get_test_image_meta()
+ mock_image_meta_from_dict.return_value = image_meta
+
+ flavor_id = 5
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',))
+
+ self.driver.rebuild(
+ context=self.ctx, instance=instance, image_meta=image_meta,
+ injected_files=None, admin_password=None, bdms=None,
+ detach_block_devices=None, attach_block_devices=None,
+ preserve_ephemeral=False)
+
+ mock_sb.assert_called_once_with(self.ctx, FAKE_CLIENT_WRAPPER, node,
+ instance, image_meta)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'vendor_passthru')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ @mock.patch.object(objects.Instance, 'save')
+ def test__do_switch_boot_device(self, mock_save, upd_mock,
+ sp_mock, vp_mock):
+ node_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=self.instance_uuid,
+ instance_type_id=5,
+ instance_info={'multiboot': True})
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',),
+ metadata={'sb_key': 'key1', 'sb_user': 'usr1', 'extras': '123'})
+
+ self.driver._do_switch_boot_device(
+ self.ctx, FAKE_CLIENT_WRAPPER,
+ node, instance, image_meta)
+
+ vp_mock.assert_called_once_with(node_uuid, 'switch_boot',
+ {'image': image_meta.id,
+ 'ssh_user': 'usr1',
+ 'ssh_key': 'key1'})
+ sp_mock.assert_called_once_with(node_uuid, 'reboot')
+ upd_mock.assert_called_once_with(
+ node_uuid, [{'path': '/instance_info/image_source', 'op': 'add',
+ 'value': image_meta.id}])
+
+ @mock.patch.object(FAKE_CLIENT.node, 'vendor_passthru')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ @mock.patch.object(objects.Instance, 'save')
+ def test__do_switch_boot_device_no_key(self, mock_save, upd_mock,
+ sp_mock, vp_mock):
+ node_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(
+ uuid=node_uuid,
+ instance_uuid=self.instance_uuid,
+ instance_type_id=5,
+ instance_info={'multiboot': True,
+ 'image_source': 'original_image',
+ })
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',),
+ metadata={'sb_user': 'usr1'})
+
+ self.driver._do_switch_boot_device(
+ self.ctx, FAKE_CLIENT_WRAPPER,
+ node, instance, image_meta)
+
+ self.assertEqual(instance.image_ref, 'original_image')
+ self.assertIn('Invalid metadata',
+ instance.metadata['switch_boot_error'])
+ mock_save.assert_called_once_with()
+ vp_mock.assert_has_calls([])
+ sp_mock.assert_has_calls([])
+ upd_mock.assert_has_calls([])
+
+ @mock.patch.object(FAKE_CLIENT.node, 'vendor_passthru')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ @mock.patch.object(objects.Instance, 'save')
+ def test__do_switch_boot_device_not_supported_by_driver(
+ self, mock_save, upd_mock, sp_mock, vp_mock):
+ node_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(
+ uuid=node_uuid,
+ instance_uuid=self.instance_uuid,
+ instance_type_id=5,
+ instance_info={'multiboot': True,
+ 'image_source': 'original_image',
+ })
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',),
+ metadata={'sb_key': 'key1', 'sb_user': 'usr1'})
+
+ vp_mock.side_effect = ironic_exception.BadRequest()
+
+ self.driver._do_switch_boot_device(
+ self.ctx, FAKE_CLIENT_WRAPPER,
+ node, instance, image_meta)
+
+ self.assertEqual(instance.image_ref, 'original_image')
+ self.assertIn('Bad Request',
+ instance.metadata['switch_boot_error'])
+ mock_save.assert_called_once_with()
+ sp_mock.assert_has_calls([])
+ upd_mock.assert_has_calls([])
+
+ @mock.patch.object(FAKE_CLIENT.node, 'vendor_passthru')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ @mock.patch.object(objects.Instance, 'save')
+ def test__do_switch_boot_device_already_in_desired_device(
+ self, mock_save, upd_mock, sp_mock, vp_mock):
+ node_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(
+ uuid=node_uuid,
+ instance_uuid=self.instance_uuid,
+ instance_type_id=5,
+ instance_info={'multiboot': True,
+ 'image_source': 'original_image',
+ })
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',),
+ metadata={'sb_key': 'key1', 'sb_user': 'usr1'})
+
+ vp_mock.side_effect = ironic_exception.BadRequest(
+ message="Node 123 Already in desired boot device.")
+
+ self.driver._do_switch_boot_device(
+ self.ctx, FAKE_CLIENT_WRAPPER,
+ node, instance, image_meta)
+
+ mock_save.assert_has_calls([])
+ sp_mock.assert_has_calls([])
+ upd_mock.assert_has_calls([])
+
+ @mock.patch.object(FAKE_CLIENT.node, 'vendor_passthru')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ @mock.patch.object(objects.Instance, 'save')
+ def test__do_switch_boot_device_reboot_error(
+ self, mock_save, upd_mock, sp_mock, vp_mock):
+ node_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(
+ uuid=node_uuid,
+ instance_uuid=self.instance_uuid,
+ instance_type_id=5,
+ instance_info={'multiboot': True,
+ 'image_source': 'original_image',
+ })
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',),
+ metadata={'sb_key': 'key1', 'sb_user': 'usr1'})
+
+ sp_mock.side_effect = ironic_exception.BadRequest()
+
+ self.driver._do_switch_boot_device(
+ self.ctx, FAKE_CLIENT_WRAPPER,
+ node, instance, image_meta)
+
+ self.assertEqual(instance.image_ref, 'original_image')
+ self.assertIn('Bad Request',
+ instance.metadata['switch_boot_error'])
+ mock_save.assert_called_once_with()
+ upd_mock.assert_has_calls([])
+
+ @mock.patch.object(FAKE_CLIENT.node, 'vendor_passthru')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ @mock.patch.object(objects.Instance, 'save')
+ def test__do_switch_boot_device_update_node_error(
+ self, mock_save, upd_mock, sp_mock, vp_mock):
+ node_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(
+ uuid=node_uuid,
+ instance_uuid=self.instance_uuid,
+ instance_type_id=5,
+ instance_info={'multiboot': True,
+ 'image_source': 'original_image',
+ })
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ instance = fake_instance.fake_instance_obj(
+ self.ctx,
+ uuid=self.instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id,
+ expected_attrs=('metadata',),
+ metadata={'sb_key': 'key1', 'sb_user': 'usr1'})
+
+ upd_mock.side_effect = ironic_exception.BadRequest()
+
+ self.driver._do_switch_boot_device(
+ self.ctx, FAKE_CLIENT_WRAPPER,
+ node, instance, image_meta)
+
+ self.assertEqual(instance.image_ref, 'original_image')
+ self.assertIn('Bad Request',
+ instance.metadata['switch_boot_error'])
+ mock_save.assert_called_once_with()
+
@mock.patch.object(FAKE_CLIENT.node, 'get')
def _test_network_binding_host_id(self, network_interface, mock_get):
node_uuid = uuidutils.generate_uuid()
diff --git a/nova/tests/unit/virt/ironic/utils.py b/nova/tests/unit/virt/ironic/utils.py
index 9df401e..6cf91ef 100644
--- a/nova/tests/unit/virt/ironic/utils.py
+++ b/nova/tests/unit/virt/ironic/utils.py
@@ -39,7 +39,7 @@ def get_test_node(**kw):
ironic_states.NOSTATE),
'last_error': kw.get('last_error'),
'instance_uuid': kw.get('instance_uuid'),
- 'instance_info': kw.get('instance_info'),
+ 'instance_info': kw.get('instance_info', {}),
'driver': kw.get('driver', 'fake'),
'driver_info': kw.get('driver_info', {}),
'properties': kw.get('properties', {}),
@@ -96,8 +96,13 @@ def get_test_flavor(**kw):
def get_test_image_meta(**kw):
- return objects.ImageMeta.from_dict(
- {'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc')})
+ return objects.ImageMeta.from_dict({
+ 'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc'),
+ 'properties': {
+ 'deploy_config': kw.get('deploy_config', ''),
+ 'driver_actions': kw.get('driver_actions', '')
+ }
+ })
class FakePortClient(object):
@@ -135,6 +140,9 @@ class FakeNodeClient(object):
def validate(self, node_uuid):
pass
+ def vendor_passthru(self, node_uuid, method, args):
+ pass
+
class FakeClient(object):
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 5ef3394..eea17a6 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -77,7 +77,7 @@ _UNPROVISION_STATES = (ironic_states.ACTIVE, ironic_states.DEPLOYFAIL,
_NODE_FIELDS = ('uuid', 'power_state', 'target_power_state', 'provision_state',
'target_provision_state', 'last_error', 'maintenance',
- 'properties', 'instance_uuid')
+ 'properties', 'instance_uuid', 'instance_info', 'driver_info')
def map_power_state(state):
@@ -363,6 +363,17 @@ class IronicDriver(virt_driver.ComputeDriver):
# Associate the node with an instance
patch.append({'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid})
+
+ deploy_config_options = self._get_deploy_config_options(
+ node, instance, image_meta)
+ patch.append(
+ {'path': '/instance_info/deploy_config_options', 'op': 'add',
+ 'value': deploy_config_options})
+
+ patch.append(
+ {'path': '/instance_info/driver_actions', 'op': 'add',
+ 'value': instance.metadata.get('driver_actions', '')})
+
try:
# FIXME(lucasagomes): The "retry_on_conflict" parameter was added
# to basically causes the deployment to fail faster in case the
@@ -377,6 +388,12 @@ class IronicDriver(virt_driver.ComputeDriver):
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
+ def _update_driver_fields_after_switch_boot(self, context, node,
+ instance, image_meta):
+ patch = [{'path': '/instance_info/image_source', 'op': 'add',
+ 'value': image_meta.id}]
+ self.ironicclient.call('node.update', node.uuid, patch)
+
def _remove_driver_fields(self, node, instance):
patch = [{'path': '/instance_info', 'op': 'remove'},
{'path': '/instance_uuid', 'op': 'remove'}]
@@ -804,9 +821,19 @@ class IronicDriver(virt_driver.ComputeDriver):
# trigger the node deploy
try:
- self.ironicclient.call("node.set_provision_state", node_uuid,
- ironic_states.ACTIVE,
- configdrive=configdrive_value)
+ # NOTE(lobur): set_provision_state to
+ # ACTIVE, REBUILD, and switch_boot_device are the only Ironic API
+ # calls where the user context needs to be passed to Ironic. This
+ # is needed to be able to fetch a tenant-owned resources for
+ # deployment, e.g. deploy_config stored in Swift. The user should
+ # have admin role, otherwise this context will be replaced by a
+ # standard Ironic context (admin tenant). It is also required to
+ # have a standalone instance of ironicclient to make sure
+ # no other calls use user context cached in the client.
+ ironicclient = client_wrapper.IronicClientWrapper()
+ ironicclient.call("node.set_provision_state", node_uuid,
+ ironic_states.ACTIVE,
+ configdrive=configdrive_value)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to request Ironic to provision instance "
@@ -828,6 +855,17 @@ class IronicDriver(virt_driver.ComputeDriver):
"baremetal node %(node)s."),
{'instance': instance.uuid,
'node': node_uuid})
+ else:
+ self._get_switch_boot_options(context, instance, node_uuid)
+
+ def _get_switch_boot_options(self, context, instance, node_uuid):
+ # Reload node to see if multiboot flag appeared.
+ node = self.ironicclient.call("node.get", node_uuid)
+ if node.instance_info.get('multiboot'):
+ multiboot_meta = node.instance_info.get('multiboot_info', {})
+ available_images = [img['image_name'] for img in
+ multiboot_meta.get('elements', [])]
+ instance.metadata['available_images'] = str(available_images)
def _unprovision(self, instance, node):
"""This method is called from destroy() to unprovision
@@ -1166,16 +1204,102 @@ class IronicDriver(virt_driver.ComputeDriver):
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
- node_uuid = instance.node
- node = self._get_node(node_uuid)
+ # NOTE(oberezovskyi): Required to get real node uuid assigned to nova
+ # instance. Workaround after
+ # Change-Id: I0233f964d8f294f0ffd9edcb16b1aaf93486177f
+ node = self.ironicclient.call("node.get_by_instance_uuid",
+ instance.uuid)
+
+ # NOTE(lobur): set_provision_state to
+ # ACTIVE, REBUILD, and switch_boot_device are the only Ironic API
+ # calls where the user context needs to be passed to Ironic. This
+ # is needed to be able to fetch tenant-owned resources for
+ # deployment, e.g. deploy_config stored in Swift. The user should
+ # have admin role, otherwise this context will be replaced by a
+ # standard Ironic context (admin tenant). It is also required to
+ # have a standalone instance of ironicclient to make sure
+ # no other calls use user context cached in the client.
+ ironicclient = client_wrapper.IronicClientWrapper()
+
+ # To get a multiboot node rebuilt through the standard flow we
+ # require a separate force_rebuild flag in meta.
+ forced_rebuild = instance.metadata.pop('force_rebuild', False)
+
+ if node.instance_info.get('multiboot') and not forced_rebuild:
+ self._do_switch_boot_device(
+ context, ironicclient, node, instance, image_meta)
+ else:
+ self._do_rebuild(
+ context, ironicclient, node, instance, image_meta,
+ injected_files,
+ admin_password, bdms, detach_block_devices,
+ attach_block_devices, network_info=network_info,
+ recreate=recreate,
+ block_device_info=block_device_info,
+ preserve_ephemeral=preserve_ephemeral)
- self._add_driver_fields(node, instance, image_meta, instance.flavor,
- preserve_ephemeral)
+ self._get_switch_boot_options(context, instance, node.uuid)
- # Trigger the node rebuild/redeploy.
+ def _do_switch_boot_device(self, context, ironicclient, node, instance,
+ image_meta):
+ old_image_ref = node.instance_info.get("image_source", "")
try:
- self.ironicclient.call("node.set_provision_state",
- node_uuid, ironic_states.REBUILD)
+ sb_user, sb_key = self._get_switch_boot_user_key(instance.metadata)
+ args = dict(ssh_user=sb_user,
+ ssh_key=sb_key,
+ image=image_meta.id)
+ ironicclient.call("node.vendor_passthru",
+ node.uuid, "switch_boot",
+ args)
+ self.ironicclient.call("node.set_power_state", node.uuid, 'reboot')
+ self._update_driver_fields_after_switch_boot(
+ context, node, instance, image_meta)
+ except (exception.InvalidMetadata, # Bad Nova API call
+ exception.NovaException, # Retry failed
+ ironic.exc.InternalServerError, # Validations
+ ironic.exc.BadRequest) as e: # Maintenance or no such API
+ # Ironic Vendor API always return 200/400/500, so the only way
+ # to check the error is introspecting its message.
+ if "Already in desired boot device" in six.text_type(e):
+ msg = (_("Ironic node %(node)s already has desired "
+ "boot device set.") % {'node': node.uuid})
+ LOG.warning(msg)
+ else:
+ # Rollback nova image ref
+ instance.image_ref = old_image_ref
+ # Cutting error msg to fit DB table.
+ instance.metadata['switch_boot_error'] = six.text_type(e)[:255]
+ instance.save()
+ msg = (_("Failed to switch Ironic %(node)s node boot "
+ "device: %(err)s")
+ % {'node': node.uuid, 'err': six.text_type(e)})
+ LOG.error(msg)
+
+ def _get_switch_boot_user_key(self, metadata):
+ sb_user = metadata.pop('sb_user', None)
+ sb_key = metadata.pop('sb_key', None)
+ if sb_user and sb_key:
+ return sb_user, sb_key
+ else:
+ raise exception.InvalidMetadata(
+ reason="To trigger switch boot device flow, both 'sb_user' "
+ "and 'sb_key' metadata params are required. To "
+ "trigger a standard rebuild flow, use "
+ "force_rebuild=True metadata flag.")
+
+ def _do_rebuild(self, context, ironicclient, node, instance, image_meta,
+ injected_files,
+ admin_password, bdms, detach_block_devices,
+ attach_block_devices, network_info=None,
+ recreate=False, block_device_info=None,
+ preserve_ephemeral=False):
+
+ self._add_driver_fields(node, instance, image_meta,
+ instance.flavor, preserve_ephemeral)
+ try:
+
+ ironicclient.call("node.set_provision_state",
+ node.uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
@@ -1191,6 +1315,25 @@ class IronicDriver(virt_driver.ComputeDriver):
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Instance was successfully rebuilt'), instance=instance)
+ def _get_deploy_config_options(self, node, instance, image_meta):
+ # Taking into account previous options, if any. This is to support
+ # rebuild flow where the user might or might not pass deploy_config
+ # reference. If no reference was passed, we'll take the option used for
+ # initial deployment.
+ res = node.instance_info.get('deploy_config_options', {})
+
+ curr_options = {
+ 'image': image_meta.properties.get('deploy_config', ''),
+ 'instance': instance.metadata.get('deploy_config', ''),
+ 'node': node.driver_info.get('deploy_config', ''),
+ }
+ # Filter out empty ones
+ curr_options = {key: value for key, value in
+ curr_options.items() if value}
+ # Override previous by current.
+ res.update(curr_options)
+ return res
+
def network_binding_host_id(self, context, instance):
"""Get host ID to associate with network ports.
--
2.7.4