Wire in storage interface attach/detach operations

Addition of storage interface attachment and detachment
operations when:

* Node power is turned on/off, with a storage_interface
  configured, and when the node is in ACTIVE state.
* Node deployment and node tear_down operations.

In addition to attachment and detachment, driver_internal_info
is now populated with a boot from volume target uuid, if a
volume is defined for the node.

Additionally, upon tear_down, the drivers now call a helper
to remove storage related dictionaries and destroy
volume target records.

The "cinder" storage interface has been enabled by default,
and further details on the storage interface's use are in
later patchsets for this feature.

Authored-By: Julia Kreger <juliaashleykreger@gmail.com>
Co-Authored-By: Joanna Taryma <joanna.taryma@intel.com>
Co-Authored-By: Michael Turek <mjturek@linux.vnet.ibm.com>
Change-Id: I0e22312e8cebb37b8f025da2baeca8eb635f35b7
Partial-Bug: #1559691
This commit is contained in:
Joanna Taryma 2017-04-10 15:46:52 -07:00 committed by Julia Kreger
parent 870d909cc7
commit 104000549e
9 changed files with 456 additions and 12 deletions

View File

@ -79,6 +79,8 @@ def node_power_action(task, new_state, timeout=None):
power state. ``None`` indicates to use default timeout.
:raises: InvalidParameterValue when the wrong state is specified
or the wrong driver info is specified.
:raises: StorageError when a failure occurs updating the node's
storage interface upon setting power on.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
@ -154,6 +156,10 @@ def node_power_action(task, new_state, timeout=None):
# take power action
try:
if (target_state == states.POWER_ON and
node.provision_state == states.ACTIVE):
task.driver.storage.attach_volumes(task)
if new_state != states.REBOOT:
if ('timeout' in reflection.get_signature(
task.driver.power.set_power_state).parameters):
@ -168,7 +174,11 @@ def node_power_action(task, new_state, timeout=None):
"doesn't support 'timeout' parameter.",
{'driver_name': node.driver})
task.driver.power.set_power_state(task, new_state)
else:
# TODO(TheJulia): We likely ought to consider toggling
# volume attachments, although we have no mechanism to
# really verify what cinder has connector wise.
if ('timeout' in reflection.get_signature(
task.driver.power.reboot).parameters):
task.driver.power.reboot(task, timeout=timeout)
@ -203,6 +213,16 @@ def node_power_action(task, new_state, timeout=None):
{'node': node.uuid,
'target_state': target_state,
'new_state': new_state})
# NOTE(TheJulia): Similarly to power-on, when we power-off
# a node, we should detach any volume attachments.
if (target_state == states.POWER_OFF and
node.provision_state == states.ACTIVE):
try:
task.driver.storage.detach_volumes(task)
except exception.StorageError as e:
LOG.warning("Volume detachment for node %(node)s "
"failed. Error: %(error)s",
{'node': node.uuid, 'error': e})
@task_manager.require_exclusive_lock

View File

@ -135,7 +135,7 @@ driver_opts = [
cfg.StrOpt('default_raid_interface',
help=_DEFAULT_IFACE_HELP.format('raid')),
cfg.ListOpt('enabled_storage_interfaces',
default=['noop'],
default=['cinder', 'noop'],
help=_ENABLED_IFACE_HELP.format('storage')),
cfg.StrOpt('default_storage_interface',
help=_DEFAULT_IFACE_HELP.format('storage')),

View File

@ -331,6 +331,9 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
node = task.node
params = {}
# TODO(jtaryma): Skip validation of image_source if
# task.driver.storage.should_write_image()
# returns False.
image_source = node.instance_info.get('image_source')
params['instance_info.image_source'] = image_source
error_msg = _('Node %s failed to validate deploy image info. Some '
@ -386,11 +389,14 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
:raises: NetworkError if the cleaning ports cannot be removed.
:raises: InvalidParameterValue when the wrong power state is specified
or the wrong driver info is specified for power management.
:raises: StorageError when the storage interface attached volumes fail
to detach.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.storage.detach_volumes(task)
deploy_utils.tear_down_storage_configuration(task)
task.driver.network.unconfigure_tenant_networks(task)
return states.DELETED
@ -404,14 +410,17 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
:raises: NetworkError: if the previous cleaning ports cannot be removed
or if new cleaning ports cannot be created.
:raises: InvalidParameterValue when the wrong power state is specified
or the wrong driver info is specified for power management.
or the wrong driver info is specified for power management.
:raises: StorageError If the storage driver is unable to attach the
configured volumes.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
wrong occurred during the power action.
:raises: exception.ImageRefValidationFailed if image_source is not
Glance href and is not HTTP(S) URL.
:raises: any boot interface's prepare_ramdisk exceptions.
"""
node = task.node
deploy_utils.populate_storage_driver_internal_info(task)
if node.provision_state == states.DEPLOYING:
# Adding the node to provisioning network so that the dhcp
# options get added for the provisioning port.
@ -420,6 +429,8 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
# configured, unbind tenant ports if present
task.driver.network.unconfigure_tenant_networks(task)
task.driver.network.add_provisioning_network(task)
# Signal to storage driver to attach volumes
task.driver.storage.attach_volumes(task)
if node.provision_state == states.ACTIVE:
task.driver.boot.prepare_instance(task)
elif node.provision_state != states.ADOPTING:

View File

@ -1204,3 +1204,96 @@ def build_instance_info_for_deploy(task):
else:
instance_info['image_type'] = 'whole-disk-image'
return instance_info
def check_interface_capability(interface, capability):
"""Evaluate interface to determine if capability is present.
:param interface: The interface object to check.
:param capability: The value representing the capability that
the caller wishes to check if present.
:returns: True if capability found, otherwise False.
"""
return capability in getattr(interface, 'capabilities', [])
def get_remote_boot_volume(task):
"""Identify a boot volume from any configured volumes.
:returns: None or the volume target representing the volume.
"""
targets = task.volume_targets
for volume in targets:
if volume['boot_index'] == 0:
return volume
def populate_storage_driver_internal_info(task):
"""Set node driver_internal_info for boot from volume parameters.
:param task: a TaskManager object containing the node.
:raises StorageError when a node has an iSCSI or FibreChannel boot volume
defined but is not capable to support it.
"""
node = task.node
boot_volume = get_remote_boot_volume(task)
if not boot_volume:
return
vol_type = str(boot_volume.volume_type).lower()
node_caps = driver_utils.capabilities_to_dict(
node.properties.get('capabilities'))
if vol_type == 'iscsi' and 'iscsi_boot' not in node_caps:
# TODO(TheJulia): In order to support the FCoE and HBA boot cases,
# some additional logic will be needed here to ensure we align.
# The deployment, in theory, should never reach this point
# if the interfaces all validated, but we shouldn't use that
# as the only guard against bad configurations.
raise exception.StorageError(_('Node %(node)s has an iSCSI boot '
'volume defined and no iSCSI boot '
'support available.') %
{'node': node.uuid})
if vol_type == 'fibre_channel' and 'fibre_channel_boot' not in node_caps:
raise exception.StorageError(_('Node %(node)s has a Fibre Channel '
'boot volume defined and no Fibre '
'Channel boot support available.') %
{'node': node.uuid})
boot_capability = ("%s_volume_boot" % vol_type)
deploy_capability = ("%s_volume_deploy" % vol_type)
vol_uuid = boot_volume['uuid']
driver_internal_info = node.driver_internal_info
if check_interface_capability(task.driver.boot, boot_capability):
driver_internal_info['boot_from_volume'] = vol_uuid
# NOTE(TheJulia): This would be a convenient place to check
# if we need to know about deploying the volume.
if (check_interface_capability(task.driver.deploy, deploy_capability) and
task.driver.storage.should_write_image(task)):
driver_internal_info['boot_from_volume_deploy'] = vol_uuid
# NOTE(TheJulia): This is also a useful place to include a
# root device hint since we should/might/be able to obtain
# and supply that information to IPA if it needs to write
# the image to the volume.
node.driver_internal_info = driver_internal_info
node.save()
def tear_down_storage_configuration(task):
"""Clean up storage configuration.
Remove entries from driver_internal_info for storage and
deletes the volume targets from the database. This is done
to ensure a clean state for the next boot of the machine.
"""
# TODO(mjturek): TheJulia mentioned that this should
# possibly be configurable for the standalone case. However,
# this is dangerous if IPA is not handling the cleaning.
for volume in task.volume_targets:
volume.destroy()
node = task.node
driver_internal_info = node.driver_internal_info
driver_internal_info.pop('boot_from_volume', None)
driver_internal_info.pop('boot_from_volume_deploy', None)
node.driver_internal_info = driver_internal_info
node.save()

View File

@ -463,10 +463,13 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface):
:raises: NetworkError if the cleaning ports cannot be removed.
:raises: InvalidParameterValue when the wrong state is specified
or the wrong driver info is specified.
:raises: StorageError when volume detachment fails.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.storage.detach_volumes(task)
deploy_utils.tear_down_storage_configuration(task)
task.driver.network.unconfigure_tenant_networks(task)
return states.DELETED
@ -483,12 +486,15 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface):
:raises: NetworkError: if the previous cleaning ports cannot be removed
or if new cleaning ports cannot be created.
:raises: InvalidParameterValue when the wrong power state is specified
or the wrong driver info is specified for power management.
or the wrong driver info is specified for power management.
:raises: StorageError If the storage driver is unable to attach the
configured volumes.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
wrong occurred during the power action.
:raises: any boot interface's prepare_ramdisk exceptions.
"""
node = task.node
deploy_utils.populate_storage_driver_internal_info(task)
if node.provision_state in [states.ACTIVE, states.ADOPTING]:
task.driver.boot.prepare_instance(task)
else:
@ -500,6 +506,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface):
# already configured, unbind tenant ports if present
task.driver.network.unconfigure_tenant_networks(task)
task.driver.network.add_provisioning_network(task)
task.driver.storage.attach_volumes(task)
deploy_opts = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, deploy_opts)

View File

@ -494,6 +494,69 @@ class NodePowerActionTestCase(base.DbTestCase):
'baremetal.node.power_set.error',
obj_fields.NotificationLevel.ERROR)
def test_node_power_action_power_on_storage_attach(self):
"""Test node_power_action to turn node power on and attach storage."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF,
storage_interface="cinder",
provision_state=states.ACTIVE)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(task.driver.storage,
'attach_volumes',
autospec=True) as attach_mock:
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
attach_mock.assert_called_once_with(task)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_reboot_storage_attach(self):
"""Test node_power_action to reboot the node and attach storage."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON,
storage_interface="cinder",
provision_state=states.ACTIVE)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(task.driver.storage,
'attach_volumes',
autospec=True) as attach_mock:
conductor_utils.node_power_action(task, states.REBOOT)
node.refresh()
attach_mock.assert_called_once_with(task)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_power_off_storage_detach(self):
"""Test node_power_action to turn node power off and detach storage."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON,
storage_interface="cinder",
provision_state=states.ACTIVE)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(task.driver.storage,
'detach_volumes',
autospec=True) as detach_mock:
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
detach_mock.assert_called_once_with(task)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
class NodeSoftPowerActionTestCase(base.DbTestCase):
@ -584,6 +647,27 @@ class NodeSoftPowerActionTestCase(base.DbTestCase):
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_soft_power_off_storage_detach(self):
"""Test node_power_action to soft power off node and detach storage."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_soft_power',
power_state=states.POWER_ON,
storage_interface="cinder",
provision_state=states.ACTIVE)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(task.driver.storage,
'detach_volumes',
autospec=True) as detach_mock:
conductor_utils.node_power_action(task, states.SOFT_POWER_OFF)
node.refresh()
detach_mock.assert_called_once_with(task)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
class CleanupAfterTimeoutTestCase(tests_base.TestCase):
def setUp(self):

View File

@ -30,6 +30,7 @@ from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import pxe
from ironic.drivers.modules.storage import noop as noop_storage
from ironic.drivers import utils as driver_utils
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
@ -144,11 +145,16 @@ class TestAgentDeploy(db_base.DbTestCase):
super(TestAgentDeploy, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
self.driver = agent.AgentDeploy()
# NOTE(TheJulia): We explicitly set the noop storage interface as the
# default below for deployment tests in order to raise any change
# in the default which could be a breaking behavior change
# as the storage interface is explicitly an "opt-in" interface.
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
'storage_interface': 'noop',
}
self.node = object_utils.create_test_node(self.context, **n)
self.ports = [
@ -258,10 +264,16 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertEqual(driver_return, states.DEPLOYWAIT)
power_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(noop_storage.NoopStorage, 'detach_volumes',
autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'unconfigure_tenant_networks', autospec=True)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_tear_down(self, power_mock, unconfigure_tenant_nets_mock):
def test_tear_down(self, power_mock,
unconfigure_tenant_nets_mock,
storage_detach_volumes_mock):
object_utils.create_test_volume_target(
self.context, node_id=self.node.id)
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.tear_down(task)
@ -269,7 +281,16 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertEqual(driver_return, states.DELETED)
unconfigure_tenant_nets_mock.assert_called_once_with(mock.ANY,
task)
storage_detach_volumes_mock.assert_called_once_with(
task.driver.storage, task)
# Verify no volumes exist for new task instances.
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.assertEqual(0, len(task.volume_targets))
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
@mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'build_agent_options')
@mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
@ -280,7 +301,8 @@ class TestAgentDeploy(db_base.DbTestCase):
def test_prepare(
self, unconfigure_tenant_net_mock, add_provisioning_net_mock,
build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock):
pxe_prepare_ramdisk_mock, storage_driver_info_mock,
storage_attach_volumes_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
@ -295,6 +317,9 @@ class TestAgentDeploy(db_base.DbTestCase):
task, {'a': 'b'})
add_provisioning_net_mock.assert_called_once_with(mock.ANY, task)
unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
storage_driver_info_mock.assert_called_once_with(task)
storage_attach_volumes_mock.assert_called_once_with(
task.driver.storage, task)
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
@ -320,6 +345,9 @@ class TestAgentDeploy(db_base.DbTestCase):
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
@mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'add_provisioning_network', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance')
@ -329,7 +357,8 @@ class TestAgentDeploy(db_base.DbTestCase):
def test_prepare_active(
self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock, pxe_prepare_instance_mock,
add_provisioning_net_mock):
add_provisioning_net_mock, storage_driver_info_mock,
storage_attach_volumes_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.ACTIVE
@ -341,6 +370,8 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertFalse(pxe_prepare_ramdisk_mock.called)
self.assertTrue(pxe_prepare_instance_mock.called)
self.assertFalse(add_provisioning_net_mock.called)
self.assertTrue(storage_driver_info_mock.called)
self.assertFalse(storage_attach_volumes_mock.called)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'add_provisioning_network', autospec=True)

View File

@ -36,8 +36,10 @@ from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils as utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import image_cache
from ironic.drivers.modules import pxe
from ironic.drivers.modules.storage import cinder
from ironic.drivers import utils as driver_utils
from ironic.tests import base as tests_base
from ironic.tests.unit.conductor import mgr_utils
@ -2335,3 +2337,166 @@ class TestBuildInstanceInfoForDeploy(db_base.DbTestCase):
self.assertRaises(exception.ImageRefValidationFailed,
utils.build_instance_info_for_deploy, task)
class TestStorageInterfaceUtils(db_base.DbTestCase):
def setUp(self):
super(TestStorageInterfaceUtils, self).setUp()
self.node = obj_utils.create_test_node(self.context,
driver='fake')
def test_check_interface_capability(self):
class fake_driver(object):
capabilities = ['foo', 'bar']
self.assertTrue(utils.check_interface_capability(fake_driver, 'foo'))
self.assertFalse(utils.check_interface_capability(fake_driver, 'baz'))
def test_get_remote_boot_volume(self):
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='4321')
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=uuidutils.generate_uuid())
self.node.storage_interface = 'cinder'
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
volume = utils.get_remote_boot_volume(task)
self.assertEqual('1234', volume['volume_id'])
def test_get_remote_boot_volume_none(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertIsNone(utils.get_remote_boot_volume(task))
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='4321')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertIsNone(utils.get_remote_boot_volume(task))
@mock.patch.object(fake, 'FakeBoot', autospec=True)
@mock.patch.object(fake, 'FakeDeploy', autospec=True)
@mock.patch.object(cinder.CinderStorage, 'should_write_image',
autospec=True)
def test_populate_storage_driver_internal_info_iscsi(self,
mock_should_write,
mock_deploy,
mock_boot):
mock_deploy.return_value = mock.Mock(
capabilities=['iscsi_volume_deploy'])
mock_boot.return_value = mock.Mock(
capabilities=['iscsi_volume_boot'])
mock_should_write.return_value = True
vol_uuid = uuidutils.generate_uuid()
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_uuid)
# NOTE(TheJulia): Since the default for the storage_interface
# is a noop interface, we need to define another driver that
# can be loaded by driver_manager in order to create the task
# to test this method.
self.node.storage_interface = "cinder"
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
driver_utils.add_node_capability(task,
'iscsi_boot',
'True')
utils.populate_storage_driver_internal_info(task)
self.assertEqual(
vol_uuid,
task.node.driver_internal_info.get('boot_from_volume', None))
self.assertEqual(
vol_uuid,
task.node.driver_internal_info.get('boot_from_volume_deploy',
None))
@mock.patch.object(fake, 'FakeBoot', autospec=True)
@mock.patch.object(fake, 'FakeDeploy', autospec=True)
@mock.patch.object(cinder.CinderStorage, 'should_write_image',
autospec=True)
def test_populate_storage_driver_internal_info_fc(self,
mock_should_write,
mock_deploy,
mock_boot):
mock_deploy.return_value = mock.Mock(
capabilities=['fibre_channel_volume_deploy'])
mock_boot.return_value = mock.Mock(
capabilities=['fibre_channel_volume_boot'])
mock_should_write.return_value = True
self.node.storage_interface = "cinder"
self.node.save()
vol_uuid = uuidutils.generate_uuid()
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='fibre_channel',
boot_index=0, volume_id='1234', uuid=vol_uuid)
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
driver_utils.add_node_capability(task,
'fibre_channel_boot',
'True')
utils.populate_storage_driver_internal_info(task)
self.assertEqual(
vol_uuid,
task.node.driver_internal_info.get('boot_from_volume', None))
self.assertEqual(
vol_uuid,
task.node.driver_internal_info.get('boot_from_volume_deploy',
None))
@mock.patch.object(fake, 'FakeBoot', autospec=True)
@mock.patch.object(fake, 'FakeDeploy', autospec=True)
def test_populate_storage_driver_internal_info_error(
self, mock_deploy, mock_boot):
mock_deploy.return_value = mock.Mock(capabilities=['fc_volume_deploy'])
mock_boot.return_value = mock.Mock(capabilities=['fc_volume_boot'])
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaises(exception.StorageError,
utils.populate_storage_driver_internal_info,
task)
def test_tear_down_storage_configuration(self):
vol_uuid = uuidutils.generate_uuid()
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_uuid)
d_i_info = self.node.driver_internal_info
d_i_info['boot_from_volume'] = vol_uuid
d_i_info['boot_from_volume_deploy'] = vol_uuid
self.node.driver_internal_info = d_i_info
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
node = task.node
self.assertEqual(1, len(task.volume_targets))
self.assertEqual(
vol_uuid,
node.driver_internal_info.get('boot_from_volume'))
self.assertEqual(
vol_uuid,
node.driver_internal_info.get('boot_from_volume_deploy'))
utils.tear_down_storage_configuration(task)
node.refresh()
self.assertIsNone(
node.driver_internal_info.get('boot_from_volume'))
self.assertIsNone(
node.driver_internal_info.get('boot_from_volume_deploy'))
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertEqual(0, len(task.volume_targets))

View File

@ -37,6 +37,7 @@ from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import pxe
from ironic.drivers.modules.storage import noop as noop_storage
from ironic.drivers import utils as driver_utils
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
@ -533,11 +534,16 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
super(ISCSIDeployTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
self.driver = driver_factory.get_driver("fake_pxe")
# NOTE(TheJulia): We explicitly set the noop storage interface as the
# default below for deployment tests in order to raise any change
# in the default which could be a breaking behavior change
# as the storage interface is explicitly an "opt-in" interface.
self.node = obj_utils.create_test_node(
self.context, driver='fake_pxe',
instance_info=INST_INFO_DICT,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
storage_interface='noop',
)
self.node.driver_internal_info['agent_url'] = 'http://1.2.3.4:1234'
dhcp_factory.DHCPFactory._dhcp_provider = None
@ -562,11 +568,17 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
validate_capabilities_mock.assert_called_once_with(task.node)
validate_mock.assert_called_once_with(task)
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
@mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'add_provisioning_network', spec_set=True, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_node_active(self, prepare_instance_mock,
add_provisioning_net_mock):
add_provisioning_net_mock,
storage_driver_info_mock,
storage_attach_volumes_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.provision_state = states.ACTIVE
@ -575,6 +587,8 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
prepare_instance_mock.assert_called_once_with(
task.driver.boot, task)
self.assertEqual(0, add_provisioning_net_mock.call_count)
storage_driver_info_mock.assert_called_once_with(task)
self.assertFalse(storage_attach_volumes_mock.called)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'add_provisioning_network', spec_set=True, autospec=True)
@ -590,6 +604,10 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
task.driver.boot, task)
self.assertEqual(0, add_provisioning_net_mock.call_count)
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
@mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
@ -598,7 +616,8 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
'unconfigure_tenant_networks', spec_set=True, autospec=True)
def test_prepare_node_deploying(
self, unconfigure_tenant_net_mock, add_provisioning_net_mock,
mock_prepare_ramdisk, mock_agent_options):
mock_prepare_ramdisk, mock_agent_options,
storage_driver_info_mock, storage_attach_volumes_mock):
mock_agent_options.return_value = {'c': 'd'}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.provision_state = states.DEPLOYING
@ -610,6 +629,9 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
task.driver.boot, task, {'c': 'd'})
add_provisioning_net_mock.assert_called_once_with(mock.ANY, task)
unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
storage_driver_info_mock.assert_called_once_with(task)
storage_attach_volumes_mock.assert_called_once_with(
task.driver.storage, task)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
@ -625,11 +647,16 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
mock_check_image_size.assert_called_once_with(task)
mock_node_power_action.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(noop_storage.NoopStorage, 'detach_volumes',
autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'unconfigure_tenant_networks', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
def test_tear_down(self, node_power_action_mock,
unconfigure_tenant_nets_mock):
unconfigure_tenant_nets_mock,
storage_detach_volumes_mock):
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
state = task.driver.deploy.tear_down(task)
@ -638,6 +665,12 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
states.POWER_OFF)
unconfigure_tenant_nets_mock.assert_called_once_with(mock.ANY,
task)
storage_detach_volumes_mock.assert_called_once_with(
task.driver.storage, task)
# Verify no volumes exist for new task instances.
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
self.assertEqual(0, len(task.volume_targets))
@mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')