Merge "Update `cleaning_error_handler`"

This commit is contained in:
Zuul 2020-11-17 12:06:19 +00:00 committed by Gerrit Code Review
commit 172b76f0f9
14 changed files with 125 additions and 90 deletions

View File

@ -87,8 +87,7 @@ def do_node_clean(task, clean_steps=None):
except Exception as e:
msg = (_('Failed to prepare node %(node)s for cleaning: %(e)s')
% {'node': node.uuid, 'e': e})
LOG.exception(msg)
return utils.cleaning_error_handler(task, msg)
return utils.cleaning_error_handler(task, msg, traceback=True)
if prepare_result == states.CLEANWAIT:
# Prepare is asynchronous, the deploy driver will need to
@ -131,7 +130,6 @@ def do_next_clean_step(task, step_index):
# For manual cleaning, the target provision state is MANAGEABLE,
# whereas for automated cleaning, it is AVAILABLE.
manual_clean = node.target_provision_state == states.MANAGEABLE
if step_index is None:
steps = []
else:
@ -184,9 +182,8 @@ def do_next_clean_step(task, step_index):
'%(exc)s') %
{'node': node.uuid, 'exc': e,
'step': node.clean_step})
LOG.exception(msg)
driver_utils.collect_ramdisk_logs(task.node, label='cleaning')
utils.cleaning_error_handler(task, msg)
utils.cleaning_error_handler(task, msg, traceback=True)
return
# Check if the step is done or not. The step should return
@ -205,7 +202,6 @@ def do_next_clean_step(task, step_index):
msg = (_('While executing step %(step)s on node '
'%(node)s, step returned invalid value: %(val)s')
% {'step': step, 'node': node.uuid, 'val': result})
LOG.error(msg)
return utils.cleaning_error_handler(task, msg)
LOG.info('Node %(node)s finished clean step %(step)s',
{'node': node.uuid, 'step': step})
@ -223,8 +219,8 @@ def do_next_clean_step(task, step_index):
msg = (_('Failed to tear down from cleaning for node %(node)s, '
'reason: %(err)s')
% {'node': node.uuid, 'err': e})
LOG.exception(msg)
return utils.cleaning_error_handler(task, msg,
traceback=True,
tear_down_cleaning=False)
LOG.info('Node %s cleaning complete', node.uuid)
@ -244,12 +240,14 @@ def do_node_clean_abort(task, step_name=None):
try:
task.driver.deploy.tear_down_cleaning(task)
except Exception as e:
LOG.exception('Failed to tear down cleaning for node %(node)s '
'after aborting the operation. Error: %(err)s',
{'node': node.uuid, 'err': e})
log_msg = (_('Failed to tear down cleaning for node %(node)s '
'after aborting the operation. Error: %(err)s') %
{'node': node.uuid, 'err': e})
error_msg = _('Failed to tear down cleaning after aborting '
'the operation')
utils.cleaning_error_handler(task, error_msg,
utils.cleaning_error_handler(task, log_msg,
errmsg=error_msg,
traceback=True,
tear_down_cleaning=False,
set_fail_state=False)
return

View File

@ -397,15 +397,32 @@ def cleanup_cleanwait_timeout(task):
"check if the ramdisk responsible for the cleaning is "
"running on the node. Failed on step %(step)s.") %
{'step': task.node.clean_step})
logmsg = ("Cleaning for node %(node)s failed. %(error)s" %
{'node': task.node.uuid, 'error': last_error})
# NOTE(rloo): this is called from the periodic task for cleanwait timeouts,
# via the task manager's process_event(). The node has already been moved
# to CLEANFAIL, so the error handler doesn't need to set the fail state.
cleaning_error_handler(task, msg=last_error, set_fail_state=False)
cleaning_error_handler(task, logmsg, errmsg=last_error,
set_fail_state=False)
def cleaning_error_handler(task, msg, tear_down_cleaning=True,
def cleaning_error_handler(task, logmsg, errmsg=None, traceback=False,
tear_down_cleaning=True,
set_fail_state=True):
"""Put a failed node in CLEANFAIL and maintenance."""
"""Put a failed node in CLEANFAIL and maintenance.
:param task: a TaskManager instance.
:param logmsg: Message to be logged.
:param errmsg: Message for the user. Optional, if not provided `logmsg` is
used.
:param traceback: Whether to log a traceback. Defaults to False.
:param tear_down_cleaning: Whether to clean up the PXE and DHCP files after
cleaning. Default to True.
:param set_fail_state: Whether to set node to failed state. Default to
True.
"""
errmsg = errmsg or logmsg
LOG.error(logmsg, exc_info=traceback)
node = task.node
node.fault = faults.CLEAN_FAILURE
node.maintenance = True
@ -417,9 +434,7 @@ def cleaning_error_handler(task, msg, tear_down_cleaning=True,
msg2 = ('Failed to tear down cleaning on node %(uuid)s, '
'reason: %(err)s' % {'err': e, 'uuid': node.uuid})
LOG.exception(msg2)
msg = _('%s. Also failed to tear down cleaning.') % msg
else:
LOG.error(msg)
errmsg = _('%s. Also failed to tear down cleaning.') % errmsg
if node.provision_state in (
states.CLEANING,
@ -440,10 +455,10 @@ def cleaning_error_handler(task, msg, tear_down_cleaning=True,
# For manual cleaning, the target provision state is MANAGEABLE, whereas
# for automated cleaning, it is AVAILABLE.
manual_clean = node.target_provision_state == states.MANAGEABLE
node.last_error = msg
node.last_error = errmsg
# NOTE(dtantsur): avoid overwriting existing maintenance_reason
if not node.maintenance_reason:
node.maintenance_reason = msg
node.maintenance_reason = errmsg
node.save()
if set_fail_state and node.provision_state != states.CLEANFAIL:

View File

@ -203,12 +203,14 @@ def _post_step_reboot(task, step_type):
'node': task.node.uuid,
'err': e,
'type': step_type})
LOG.error(msg, exc_info=not isinstance(e, exception.IronicException))
traceback = not isinstance(e, exception.IronicException)
# do not set cleaning_reboot if we didn't reboot
if step_type == 'clean':
manager_utils.cleaning_error_handler(task, msg)
manager_utils.cleaning_error_handler(task, msg,
traceback=traceback)
else:
manager_utils.deploying_error_handler(task, msg)
manager_utils.deploying_error_handler(task, msg,
traceback=traceback)
return
# Signify that we've rebooted
@ -387,13 +389,13 @@ def execute_clean_step(task, step):
return execute_step(task, step, 'clean')
def _step_failure_handler(task, msg, step_type):
def _step_failure_handler(task, msg, step_type, traceback=False):
driver_utils.collect_ramdisk_logs(
task.node, label='cleaning' if step_type == 'clean' else None)
if step_type == 'clean':
manager_utils.cleaning_error_handler(task, msg)
manager_utils.cleaning_error_handler(task, msg, traceback=traceback)
else:
manager_utils.deploying_error_handler(task, msg)
manager_utils.deploying_error_handler(task, msg, traceback=traceback)
class HeartbeatMixin(object):
@ -495,10 +497,11 @@ class HeartbeatMixin(object):
node = task.node
if (node.provision_state in (states.CLEANING, states.CLEANWAIT)
and not CONF.conductor.allow_provisioning_in_maintenance):
LOG.error('Aborting cleaning for node %s, as it is in maintenance '
'mode', node.uuid)
log_msg = ('Aborting cleaning for node %s, as it is in '
'maintenance mode' % node.uuid)
last_error = _('Cleaning aborted as node is in maintenance mode')
manager_utils.cleaning_error_handler(task, last_error)
manager_utils.cleaning_error_handler(task, log_msg,
errmsg=last_error)
elif (node.provision_state in (states.DEPLOYING, states.DEPLOYWAIT)
and not CONF.conductor.allow_provisioning_in_maintenance):
LOG.error('Aborting deployment for node %s, as it is in '
@ -589,10 +592,11 @@ class HeartbeatMixin(object):
self.continue_cleaning(task)
except Exception as e:
last_error = _('%(msg)s. Error: %(exc)s') % {'msg': msg, 'exc': e}
LOG.exception('Asynchronous exception for node %(node)s: %(err)s',
{'node': task.node.uuid, 'err': last_error})
log_msg = ('Asynchronous exception for node %(node)s: %(err)s' %
{'node': task.node.uuid, 'err': last_error})
if node.provision_state in (states.CLEANING, states.CLEANWAIT):
manager_utils.cleaning_error_handler(task, last_error)
manager_utils.cleaning_error_handler(task, log_msg,
errmsg=last_error)
def _heartbeat_rescue_wait(self, task):
msg = _('Node failed to perform rescue operation')
@ -1011,14 +1015,14 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
msg = (_('Could not continue cleaning on node '
'%(node)s: %(err)s.') %
{'node': node.uuid, 'err': e})
LOG.exception(msg)
return manager_utils.cleaning_error_handler(task, msg)
return manager_utils.cleaning_error_handler(task, msg,
traceback=True)
except exception.InstanceDeployFailure as e:
msg = (_('Could not continue deployment on node '
'%(node)s: %(err)s.') %
{'node': node.uuid, 'err': e})
LOG.exception(msg)
return manager_utils.deploying_error_handler(task, msg)
return manager_utils.deploying_error_handler(task, msg,
traceback=True)
if manual_clean:
# Don't restart manual cleaning if agent reboots to a new
@ -1047,15 +1051,15 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
'%(node)s after step %(step)s: %(err)s.') %
{'node': node.uuid, 'err': e,
'step': node.clean_step})
LOG.exception(msg)
return manager_utils.cleaning_error_handler(task, msg)
return manager_utils.cleaning_error_handler(task, msg,
traceback=True)
except exception.InstanceDeployFailure as e:
msg = (_('Could not restart deployment on node '
'%(node)s after step %(step)s: %(err)s.') %
{'node': node.uuid, 'err': e,
'step': node.deploy_step})
LOG.exception(msg)
return manager_utils.deploying_error_handler(task, msg)
return manager_utils.deploying_error_handler(task, msg,
traceback=True)
manager_utils.notify_conductor_resume_operation(task, step_type)
@ -1109,7 +1113,6 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
'err': agent_client.get_command_error(command),
'step': current_step,
'type': step_type})
LOG.error(msg)
return _step_failure_handler(task, msg, step_type)
# NOTE(dtantsur): VERSION_MISMATCH is a new alias for
# CLEAN_VERSION_MISMATCH, remove the old one after IPA removes it.
@ -1137,8 +1140,8 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
'cls': e.__class__.__name__,
'step': current_step,
'type': step_type})
LOG.exception(msg)
return _step_failure_handler(task, msg, step_type)
return _step_failure_handler(task, msg, step_type,
traceback=True)
if current_step.get('reboot_requested'):
_post_step_reboot(task, step_type)
@ -1155,7 +1158,6 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
'err': command.get('command_status'),
'step': current_step,
'type': step_type})
LOG.error(msg)
return _step_failure_handler(task, msg, step_type)
@METRICS.timer('AgentDeployMixin.tear_down_agent')

View File

@ -1495,11 +1495,10 @@ class DracWSManRAID(base.RAIDInterface):
"Message: '%(message)s'.") %
{'config_job_id': config_job.id,
'message': config_job.message})
log_msg = (_("RAID configuration job failed for node %(node)s. "
"%(error)s") %
log_msg = ("RAID configuration job failed for node %(node)s. "
"%(error)s" %
{'node': task.node.uuid, 'error': error_msg})
if task.node.clean_step:
LOG.error(log_msg)
manager_utils.cleaning_error_handler(task, error_msg)
else:
manager_utils.deploying_error_handler(task, log_msg, error_msg)

View File

@ -920,15 +920,16 @@ class Ilo5Management(IloManagement):
LOG.info("No drive found to perform out-of-band sanitize "
"disk erase for node %(node)s", {'node': node.uuid})
except ilo_error.IloError as ilo_exception:
LOG.error("Out-of-band sanitize disk erase job failed for node "
"%(node)s. Message: '%(message)s'.",
{'node': task.node.uuid, 'message': ilo_exception})
log_msg = ("Out-of-band sanitize disk erase job failed for node "
"%(node)s. Message: '%(message)s'." %
{'node': task.node.uuid, 'message': ilo_exception})
self._pop_driver_internal_values(task,
'ilo_disk_erase_hdd_check',
'ilo_disk_erase_ssd_check',
'cleaning_reboot',
'skip_current_clean_step')
manager_utils.cleaning_error_handler(task, ilo_exception)
manager_utils.cleaning_error_handler(task, log_msg,
errmsg=ilo_exception)
@base.clean_step(priority=0, abortable=False)
def one_button_secure_erase(self, task):
@ -961,7 +962,8 @@ class Ilo5Management(IloManagement):
node.save()
return states.CLEANWAIT
except ilo_error.IloError as ilo_exception:
LOG.error("One button secure erase job failed for node "
"%(node)s. Message: '%(message)s'.",
{'node': task.node.uuid, 'message': ilo_exception})
manager_utils.cleaning_error_handler(task, ilo_exception)
log_msg = ("One button secure erase job failed for node "
"%(node)s. Message: '%(message)s'." %
{'node': task.node.uuid, 'message': ilo_exception})
manager_utils.cleaning_error_handler(task, log_msg,
errmsg=ilo_exception)

View File

@ -69,13 +69,13 @@ class Ilo5RAID(base.RAIDInterface):
return ilo_common.REQUIRED_PROPERTIES
def _set_step_failed(self, task, msg, exc):
LOG.error("RAID configuration job failed for node %(node)s. "
"Message: '%(message)s'.",
{'node': task.node.uuid, 'message': msg})
log_msg = ("RAID configuration job failed for node %(node)s. "
"Message: '%(message)s'." %
{'node': task.node.uuid, 'message': msg})
if task.node.provision_state == states.DEPLOYING:
manager_utils.deploying_error_handler(task, msg)
manager_utils.deploying_error_handler(task, log_msg, errmsg=msg)
else:
manager_utils.cleaning_error_handler(task, msg)
manager_utils.cleaning_error_handler(task, log_msg, errmsg=msg)
def _set_driver_internal_true_value(self, task, *keys):
driver_internal_info = task.node.driver_internal_info

View File

@ -333,7 +333,6 @@ class RedfishBIOS(base.BIOSInterface):
'Attributes %(attrs)s are not updated.') %
{'attrs': attrs_not_updated})
if task.node.provision_state in [states.CLEANING, states.CLEANWAIT]:
LOG.error(error_msg)
manager_utils.cleaning_error_handler(task, last_error)
if task.node.provision_state in [states.DEPLOYING, states.DEPLOYWAIT]:
manager_utils.deploying_error_handler(task, error_msg, last_error)

View File

@ -995,7 +995,6 @@ class RedfishManagement(base.ManagementInterface):
{'node': node.uuid,
'firmware_image': current_update['url'],
'errors': ", ".join(messages)})
LOG.error(error_msg)
task.upgrade_lock()
self._clear_firmware_updates(node)

View File

@ -870,7 +870,7 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
mock_execute.assert_called_once_with(
mock.ANY, mock.ANY, self.clean_steps[0])
@mock.patch.object(cleaning, 'LOG', autospec=True)
@mock.patch.object(conductor_utils, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
@ -918,9 +918,9 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
mock.call(mock.ANY, mock.ANY, self.clean_steps[1]),
]
self.assertEqual(power_exec_calls, power_exec_mock.call_args_list)
log_mock.exception.assert_called_once_with(
log_mock.error.assert_called_once_with(
'Failed to tear down from cleaning for node {}, reason: boom'
.format(node.uuid))
.format(node.uuid), exc_info=True)
def test__do_next_clean_step_automated_fail_in_tear_down_cleaning(self):
self._do_next_clean_step_fail_in_tear_down_cleaning()

View File

@ -1066,14 +1066,19 @@ class ErrorHandlersTestCase(tests_base.TestCase):
@mock.patch.object(conductor_utils, 'cleaning_error_handler',
autospec=True)
def test_cleanup_cleanwait_timeout_handler_call(self, mock_error_handler):
self.task.node.uuid = '18c95393-b775-4887-a274-c45be47509d5'
self.node.clean_step = {}
conductor_utils.cleanup_cleanwait_timeout(self.task)
mock_error_handler.assert_called_once_with(
self.task,
msg="Timeout reached while cleaning the node. Please "
"check if the ramdisk responsible for the cleaning is "
"running on the node. Failed on step {}.",
logmsg="Cleaning for node 18c95393-b775-4887-a274-c45be47509d5 "
"failed. Timeout reached while cleaning the node. Please "
"check if the ramdisk responsible for the cleaning is "
"running on the node. Failed on step {}.",
errmsg="Timeout reached while cleaning the node. Please "
"check if the ramdisk responsible for the cleaning is "
"running on the node. Failed on step {}.",
set_fail_state=False)
def test_cleanup_cleanwait_timeout(self):
@ -1096,7 +1101,9 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertEqual(clean_error, self.node.maintenance_reason)
self.assertEqual('clean failure', self.node.fault)
def _test_cleaning_error_handler(self, prov_state=states.CLEANING):
@mock.patch.object(conductor_utils.LOG, 'error', autospec=True)
def _test_cleaning_error_handler(self, mock_log_error,
prov_state=states.CLEANING):
self.node.provision_state = prov_state
target = 'baz'
self.node.target_provision_state = target
@ -1108,7 +1115,9 @@ class ErrorHandlersTestCase(tests_base.TestCase):
'clean_step_index': 0,
'agent_url': 'url'}
msg = 'error bar'
conductor_utils.cleaning_error_handler(self.task, msg)
last_error = "last error"
conductor_utils.cleaning_error_handler(self.task, msg,
errmsg=last_error)
self.node.save.assert_called_once_with()
self.assertEqual({}, self.node.clean_step)
self.assertNotIn('clean_step_index', self.node.driver_internal_info)
@ -1116,9 +1125,9 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertNotIn('cleaning_polling', self.node.driver_internal_info)
self.assertNotIn('skip_current_clean_step',
self.node.driver_internal_info)
self.assertEqual(msg, self.node.last_error)
self.assertEqual(last_error, self.node.last_error)
self.assertTrue(self.node.maintenance)
self.assertEqual(msg, self.node.maintenance_reason)
self.assertEqual(last_error, self.node.maintenance_reason)
self.assertEqual('clean failure', self.node.fault)
driver = self.task.driver.deploy
driver.tear_down_cleaning.assert_called_once_with(self.task)
@ -1128,6 +1137,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.task.process_event.assert_called_once_with('fail',
target_state=None)
self.assertNotIn('agent_url', self.node.driver_internal_info)
mock_log_error.assert_called_once_with(msg, exc_info=False)
def test_cleaning_error_handler(self):
self._test_cleaning_error_handler()
@ -1172,6 +1182,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
msg = 'foo'
driver.tear_down_cleaning.side_effect = _side_effect
conductor_utils.cleaning_error_handler(self.task, msg)
log_mock.error.assert_called_once_with(msg, exc_info=False)
self.assertTrue(log_mock.exception.called)
self.assertIn(msg, self.node.last_error)
self.assertIn(msg, self.node.maintenance_reason)

View File

@ -1669,12 +1669,11 @@ class Ilo5ManagementTestCase(db_base.DbTestCase):
task.driver.management.erase_devices,
task, erase_pattern={'ssd': 'xyz'})
@mock.patch.object(ilo_management.LOG, 'error', autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler',
autospec=True)
def test_erase_devices_hdd_ilo_error(self, clean_err_handler_mock,
ilo_mock, log_mock):
ilo_mock):
ilo_mock_object = ilo_mock.return_value
ilo_mock_object.get_available_disk_types.return_value = ['HDD']
exc = ilo_error.IloError('error')
@ -1692,8 +1691,11 @@ class Ilo5ManagementTestCase(db_base.DbTestCase):
task.node.driver_internal_info)
self.assertNotIn('skip_current_clean_step',
task.node.driver_internal_info)
self.assertTrue(log_mock.called)
clean_err_handler_mock.assert_called_once_with(task, exc)
clean_err_handler_mock.assert_called_once_with(
task,
("Out-of-band sanitize disk erase job failed for node %s. "
"Message: 'error'." % task.node.uuid),
errmsg=exc)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
@ -1711,12 +1713,11 @@ class Ilo5ManagementTestCase(db_base.DbTestCase):
mock_power.assert_called_once_with(task, states.REBOOT)
self.assertEqual(task.node.maintenance, True)
@mock.patch.object(ilo_management.LOG, 'error', autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler',
autospec=True)
def test_one_button_secure_erase_ilo_error(
self, clean_err_handler_mock, ilo_mock, log_mock):
self, clean_err_handler_mock, ilo_mock):
ilo_mock_object = ilo_mock.return_value
self.node.clean_step = {'step': 'one_button_secure_erase',
'interface': 'management'}
@ -1726,7 +1727,10 @@ class Ilo5ManagementTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.one_button_secure_erase(task)
clean_err_handler_mock.assert_called_once_with(task, exc)
clean_err_handler_mock.assert_called_once_with(
task,
("One button secure erase job failed for node %s. "
"Message: 'error'." % task.node.uuid),
errmsg=exc)
self.assertTrue(
ilo_mock_object.do_one_button_secure_erase.called)
self.assertTrue(log_mock.called)

View File

@ -415,6 +415,9 @@ class Ilo5RAIDTestCase(db_base.DbTestCase):
task.node.driver_internal_info)
clean_err_handler_mock.assert_called_once_with(
task,
("RAID configuration job failed for node %s. Message: "
"'Failed to create raid configuration on node %s'." %
(self.node.uuid, self.node.uuid)),
'Failed to create raid configuration '
'on node %s' % self.node.uuid)
else:
@ -422,6 +425,9 @@ class Ilo5RAIDTestCase(db_base.DbTestCase):
task.node.driver_internal_info)
deploy_err_handler_mock.assert_called_once_with(
task,
("RAID configuration job failed for node %s. Message: "
"'Failed to create raid configuration on node %s'." %
(self.node.uuid, self.node.uuid)),
'Failed to create raid configuration '
'on node %s' % self.node.uuid)

View File

@ -1093,11 +1093,9 @@ class RedfishManagementTestCase(db_base.DbTestCase):
self.assertTrue(mock_log.called)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(redfish_mgmt.LOG, 'error', autospec=True)
@mock.patch.object(redfish_utils, 'get_update_service', autospec=True)
def test__check_node_firmware_update_fail(self,
mock_get_update_service,
mock_log,
mock_cleaning_error_handler):
mock_sushy_task = mock.Mock()
mock_sushy_task.task_state = 'exception'
@ -1131,7 +1129,6 @@ class RedfishManagementTestCase(db_base.DbTestCase):
management._check_node_firmware_update(task)
task.upgrade_lock.assert_called_once_with()
self.assertTrue(mock_log.called)
self.assertEqual({'something': 'else'},
task.node.driver_internal_info)
mock_cleaning_error_handler.assert_called_once()

View File

@ -597,7 +597,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
self.deploy.heartbeat(task, 'http://127.0.0.1:8080', '1.0.0')
mock_touch.assert_called_once_with(mock.ANY)
mock_handler.assert_called_once_with(task, mock.ANY)
mock_handler.assert_called_once_with(task, mock.ANY, mock.ANY)
for called in before_failed_mocks + [failed_mock]:
self.assertTrue(called.called)
for not_called in after_failed_mocks:
@ -671,7 +671,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
self.deploy.heartbeat(task, 'http://127.0.0.1:8080', '1.0.0')
mock_continue.assert_called_once_with(mock.ANY, task)
mock_handler.assert_called_once_with(task, mock.ANY)
mock_handler.assert_called_once_with(task, mock.ANY, mock.ANY)
@mock.patch.object(manager_utils, 'rescuing_error_handler', autospec=True)
@mock.patch.object(agent_base.HeartbeatMixin, '_finalize_rescue',
@ -1778,7 +1778,8 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
shared=False) as task:
agent_base._post_step_reboot(task, 'clean')
mock_reboot.assert_called_once_with(task, states.REBOOT)
mock_handler.assert_called_once_with(task, mock.ANY)
mock_handler.assert_called_once_with(task, mock.ANY,
traceback=True)
self.assertNotIn('cleaning_reboot',
task.node.driver_internal_info)
@ -1795,7 +1796,8 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
shared=False) as task:
agent_base._post_step_reboot(task, 'deploy')
mock_reboot.assert_called_once_with(task, states.REBOOT)
mock_handler.assert_called_once_with(task, mock.ANY)
mock_handler.assert_called_once_with(task, mock.ANY,
traceback=True)
self.assertNotIn('deployment_reboot',
task.node.driver_internal_info)
@ -1913,7 +1915,8 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
get_hook_mock.assert_called_once_with(task.node, 'clean')
hook_mock.assert_called_once_with(task, command_status)
error_handler_mock.assert_called_once_with(task, mock.ANY)
error_handler_mock.assert_called_once_with(task, mock.ANY,
traceback=True)
self.assertFalse(notify_mock.called)
collect_logs_mock.assert_called_once_with(task.node,
label='cleaning')
@ -1993,7 +1996,7 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
error_mock.assert_called_once_with(task, mock.ANY)
error_mock.assert_called_once_with(task, mock.ANY, traceback=False)
collect_logs_mock.assert_called_once_with(task.node,
label='cleaning')
@ -2064,7 +2067,7 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
status_mock.assert_called_once_with(mock.ANY, task.node)
refresh_steps_mock.assert_called_once_with(mock.ANY, task, 'clean')
error_mock.assert_called_once_with(task, mock.ANY)
error_mock.assert_called_once_with(task, mock.ANY, traceback=True)
self.assertFalse(notify_mock.called)
self.assertFalse(steps_mock.called)
@ -2081,7 +2084,7 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
error_mock.assert_called_once_with(task, mock.ANY)
error_mock.assert_called_once_with(task, mock.ANY, traceback=False)
def _test_clean_step_hook(self):
"""Helper method for unit tests related to clean step hooks."""