Fix and overhaul helm test integration

The helm test integration was severely broken, this fixes it by:

* correctly handle tiller test call response
* removes unnecessary call to tiller to get release content
* removes unnecessary call to k8s to check for test pod completion
* moves common logic into a test handler
* adds test coverage for the above
* adds logging for test results streamed from tiller

Change-Id: I09062387a1abc2fc3f6960f987c97248d9e1cb69
This commit is contained in:
Sean Eagan 2018-06-19 12:57:42 -05:00
parent 6546139155
commit d91dd8ad70
11 changed files with 398 additions and 131 deletions

View File

@ -21,6 +21,7 @@ from oslo_config import cfg
from armada import api
from armada.common import policy
from armada import const
from armada.handlers.test import test_release_for_success
from armada.handlers.tiller import Tiller
from armada.handlers.manifest import Manifest
from armada.utils.release import release_prefixer
@ -44,7 +45,7 @@ class TestReleasesReleaseNameController(api.BaseResource):
'tiller_port') or CONF.tiller_port,
tiller_namespace=req.get_param(
'tiller_namespace', default=CONF.tiller_namespace))
tiller_resp = tiller.testing_release(release)
success = test_release_for_success(tiller, release)
# TODO(fmontei): Provide more sensible exception(s) here.
except Exception as e:
err_message = 'Failed to test {}: {}'.format(release, e)
@ -52,26 +53,18 @@ class TestReleasesReleaseNameController(api.BaseResource):
return self.return_error(
resp, falcon.HTTP_500, message=err_message)
msg = {
'result': '',
'message': ''
}
if tiller_resp:
test_status = getattr(
tiller_resp.info.status, 'last_test_suite_run', 'FAILED')
if test_status.result[0].status == 'PASSED':
msg['result'] = 'PASSED: {}'.format(release)
msg['message'] = 'MESSAGE: Test Pass'
self.logger.info(msg)
else:
msg['result'] = 'FAILED: {}'.format(release)
msg['message'] = 'MESSAGE: Test Fail'
self.logger.info(msg)
if success:
msg = {
'result': 'PASSED: {}'.format(release),
'message': 'MESSAGE: Test Pass'
}
else:
msg['result'] = 'FAILED: {}'.format(release)
msg['message'] = 'MESSAGE: No test found'
msg = {
'result': 'FAILED: {}'.format(release),
'message': 'MESSAGE: Test Fail'
}
self.logger.info(msg)
resp.body = json.dumps(msg)
resp.status = falcon.HTTP_200
@ -174,15 +167,8 @@ class TestReleasesManifestController(api.BaseResource):
if release_name in known_releases:
self.logger.info('RUNNING: %s tests', release_name)
resp = tiller.testing_release(release_name)
if not resp:
continue
test_status = getattr(
resp.info.status, 'last_test_suite_run',
'FAILED')
if test_status.results[0].status:
success = test_release_for_success(tiller, release_name)
if success:
self.logger.info("PASSED: %s", release_name)
message['test']['passed'].append(release_name)
else:

View File

@ -19,6 +19,7 @@ from oslo_config import cfg
from armada.cli import CliAction
from armada import const
from armada.handlers.test import test_release_for_success
from armada.handlers.manifest import Manifest
from armada.handlers.tiller import Tiller
from armada.utils.release import release_prefixer
@ -112,15 +113,8 @@ class TestChartManifest(CliAction):
if self.release:
if not self.ctx.obj.get('api', False):
self.logger.info("RUNNING: %s tests", self.release)
resp = tiller.testing_release(self.release)
if not resp:
self.logger.info("FAILED: %s", self.release)
return
test_status = getattr(resp.info.status, 'last_test_suite_run',
'FAILED')
if test_status.results[0].status:
success = test_release_for_success(tiller, self.release)
if success:
self.logger.info("PASSED: %s", self.release)
else:
self.logger.info("FAILED: %s", self.release)
@ -154,15 +148,9 @@ class TestChartManifest(CliAction):
if release_name in known_release_names:
self.logger.info('RUNNING: %s tests', release_name)
resp = tiller.testing_release(release_name)
if not resp:
continue
test_status = getattr(
resp.info.status, 'last_test_suite_run',
'FAILED')
if test_status.results[0].status:
success = test_release_for_success(
tiller, release_name)
if success:
self.logger.info("PASSED: %s", release_name)
else:
self.logger.info("FAILED: %s", release_name)

View File

@ -118,6 +118,20 @@ class ReleaseException(TillerException):
super(ReleaseException, self).__init__(message)
class TestFailedException(TillerException):
'''
Exception that occurs when a release test fails.
**Troubleshoot:**
*Coming Soon*
'''
def __init__(self, release):
message = 'Test failed for release: {}'.format(release)
super(TestFailedException, self).__init__(message)
class ChannelException(TillerException):
'''
Exception that occurs during a failed gRPC channel creation

View File

@ -27,6 +27,7 @@ from armada.exceptions import validate_exceptions
from armada.handlers.chartbuilder import ChartBuilder
from armada.handlers.manifest import Manifest
from armada.handlers.override import Override
from armada.handlers.test import test_release_for_success
from armada.handlers.tiller import Tiller
from armada.utils.release import release_prefixer
from armada.utils import source
@ -435,21 +436,22 @@ class Armada(object):
# Sequenced ChartGroup should run tests after each Chart
timer = int(round(deadline - time.time()))
if test_this_chart and cg_sequenced:
LOG.info('Running sequenced test, timeout remaining: %ss.',
timer)
if timer <= 0:
reason = ('Timeout expired before testing sequenced '
'release %s' % release_name)
LOG.error(reason)
raise armada_exceptions.ArmadaTimeoutException(reason)
self._test_chart(release_name, timer)
# TODO(MarshM): handle test failure or timeout
if test_this_chart:
if cg_sequenced:
LOG.info('Running sequenced test, timeout remaining: '
'%ss.', timer)
if timer <= 0:
reason = ('Timeout expired before testing '
'sequenced release %s' % release_name)
LOG.error(reason)
raise armada_exceptions.ArmadaTimeoutException(
reason)
self._test_chart(release_name, timer)
# Un-sequenced ChartGroup should run tests at the end
elif test_this_chart:
# Keeping track of time remaining
tests_to_run.append((release_name, timer))
# Un-sequenced ChartGroup should run tests at the end
else:
# Keeping track of time remaining
tests_to_run.append((release_name, timer))
# End of Charts in ChartGroup
LOG.info('All Charts applied in ChartGroup %s.', cg_name)
@ -481,7 +483,6 @@ class Armada(object):
# After entire ChartGroup is healthy, run any pending tests
for (test, test_timer) in tests_to_run:
self._test_chart(test, test_timer)
# TODO(MarshM): handle test failure or timeout
self.post_flight_ops()
@ -532,16 +533,13 @@ class Armada(object):
'release=%s with timeout %ss.', release_name, timeout)
return True
# TODO(MarshM): Fix testing, it's broken, and track timeout
resp = self.tiller.testing_release(release_name, timeout=timeout)
status = getattr(resp.info.status, 'last_test_suite_run', 'FAILED')
LOG.info("Test info.status: %s", status)
if resp:
success = test_release_for_success(
self.tiller, release_name, timeout=timeout)
if success:
LOG.info("Test passed for release: %s", release_name)
return True
else:
LOG.info("Test failed for release: %s", release_name)
return False
raise tiller_exceptions.TestFailedException(release_name)
def show_diff(self, chart, installed_chart, installed_values, target_chart,
target_values, msg):

29
armada/handlers/test.py Normal file
View File

@ -0,0 +1,29 @@
# Copyright 2018 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from armada import const
TESTRUN_STATUS_UNKNOWN = 0
TESTRUN_STATUS_SUCCESS = 1
TESTRUN_STATUS_FAILURE = 2
TESTRUN_STATUS_RUNNING = 3
def test_release_for_success(tiller, release,
timeout=const.DEFAULT_TILLER_TIMEOUT):
test_suite_run = tiller.test_release(release, timeout)
results = getattr(test_suite_run, 'results', [])
failed_results = [r for r in results if
r.status != TESTRUN_STATUS_SUCCESS]
return len(failed_results) == 0

View File

@ -32,12 +32,12 @@ from oslo_log import log as logging
from armada import const
from armada.exceptions import tiller_exceptions as ex
from armada.handlers.k8s import K8s
from armada.handlers import test
from armada.utils.release import label_selectors
TILLER_VERSION = b'2.7.2'
GRPC_EPSILON = 60
RELEASE_LIMIT = 128 # TODO(mark-burnett): There may be a better page size.
RELEASE_RUNTEST_SUCCESS = 9
# the standard gRPC max message size is 4MB
# this expansion comes at a performance penalty
@ -434,41 +434,43 @@ class Tiller(object):
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Install')
def testing_release(self, release, timeout=const.DEFAULT_TILLER_TIMEOUT,
cleanup=True):
def test_release(self, release, timeout=const.DEFAULT_TILLER_TIMEOUT,
cleanup=True):
'''
:param release - name of release to test
:param timeout - runtime before exiting
:param cleanup - removes testing pod created
:returns - results of test pod
:returns - test suite run object
'''
LOG.info("Running Helm test: release=%s, timeout=%s", release, timeout)
try:
stub = ReleaseServiceStub(self.channel)
# TODO: This timeout is redundant since we already have the grpc
# timeout below, and it's actually used by tiller for individual
# k8s operations not the overall request, should we:
# 1. Remove this timeout
# 2. Add `k8s_timeout=const.DEFAULT_K8S_TIMEOUT` arg and use
release_request = TestReleaseRequest(
name=release, timeout=timeout, cleanup=cleanup)
name=release, timeout=timeout,
cleanup=cleanup)
content = self.get_release_content(release)
test_message_stream = stub.RunReleaseTest(
release_request, timeout, metadata=self.metadata)
if not len(content.release.hooks):
LOG.info('No test found')
return False
failed = 0
for test_message in test_message_stream:
if test_message.status == test.TESTRUN_STATUS_FAILURE:
failed += 1
LOG.info(test_message.msg)
if failed:
LOG.info('{} test(s) failed'.format(failed))
if content.release.hooks[0].events[0] == RELEASE_RUNTEST_SUCCESS:
test = stub.RunReleaseTest(
release_request, timeout, metadata=self.metadata)
if test.running():
self.k8s.wait_get_completed_podphase(release, timeout)
test.cancel()
return self.get_release_status(release)
status = self.get_release_status(release)
return status.info.status.last_test_suite_run
except Exception:
LOG.exception('Error while testing release %s', release)

View File

@ -107,3 +107,12 @@ def attr(**kwargs):
return f
return decorator
class AttrDict(dict):
"""Allows defining objects with attributes without defining a class
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self

View File

@ -55,59 +55,48 @@ class TestReleasesManifestControllerTest(base.BaseControllerTest):
class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
@mock.patch.object(test, 'test_release_for_success')
@mock.patch.object(test, 'Tiller')
def test_test_controller_test_pass(self, mock_tiller):
def test_test_controller_test_pass(
self, mock_tiller, mock_test_release_for_success):
rules = {'armada:test_release': '@'}
self.policy.set_rules(rules)
testing_release = mock_tiller.return_value.testing_release
testing_release.return_value = mock.Mock(
**{'info.status.last_test_suite_run.result': [
mock.Mock(status='PASSED')]})
mock_test_release_for_success.return_value = True
resp = self.app.simulate_get('/api/v1.0/test/fake-release')
self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: Test Pass',
json.loads(resp.text)['message'])
@mock.patch.object(test, 'test_release_for_success')
@mock.patch.object(test, 'Tiller')
def test_test_controller_test_fail(self, mock_tiller):
def test_test_controller_test_fail(
self, mock_tiller, mock_test_release_for_success):
rules = {'armada:test_release': '@'}
self.policy.set_rules(rules)
testing_release = mock_tiller.return_value.testing_release
testing_release.return_value = mock.Mock(
**{'info.status.last_test_suite_run.result': [
mock.Mock(status='FAILED')]})
mock_test_release_for_success.return_value = False
resp = self.app.simulate_get('/api/v1.0/test/fake-release')
self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: Test Fail',
json.loads(resp.text)['message'])
@mock.patch.object(test, 'Tiller')
def test_test_controller_no_test_found(self, mock_tiller):
rules = {'armada:test_release': '@'}
self.policy.set_rules(rules)
mock_tiller.return_value.testing_release.return_value = None
resp = self.app.simulate_get('/api/v1.0/test/fake-release')
self.assertEqual(200, resp.status_code)
self.assertEqual('MESSAGE: No test found',
json.loads(resp.text)['message'])
@test_utils.attr(type=['negative'])
class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(test, 'Manifest')
@mock.patch.object(test, 'Tiller')
def test_test_controller_tiller_exc_returns_500(self, mock_tiller, _):
@mock.patch.object(test, 'test_release_for_success')
def test_test_controller_tiller_exc_returns_500(
self, mock_test_release_for_success, mock_tiller, _):
rules = {'armada:tests_manifest': '@'}
self.policy.set_rules(rules)
mock_tiller.side_effect = Exception
mock_test_release_for_success.side_effect = Exception
resp = self.app.simulate_post('/api/v1.0/tests')
self.assertEqual(500, resp.status_code)
@ -187,11 +176,14 @@ class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
class TestReleasesReleaseNameControllerNegativeTest(base.BaseControllerTest):
@mock.patch.object(test, 'Tiller')
def test_test_controller_tiller_exc_returns_500(self, mock_tiller):
@mock.patch.object(test, 'test_release_for_success')
def test_test_controller_tiller_exc_returns_500(
self, mock_test_release_for_success, mock_tiller):
rules = {'armada:test_release': '@'}
self.policy.set_rules(rules)
mock_tiller.side_effect = Exception
mock_test_release_for_success.side_effect = Exception
resp = self.app.simulate_get('/api/v1.0/test/fake-release')
self.assertEqual(500, resp.status_code)

View File

@ -18,7 +18,9 @@ import yaml
from armada import const
from armada.handlers import armada
from armada.tests.unit import base
from armada.tests.test_utils import AttrDict
from armada.utils.release import release_prefixer
from armada.exceptions import tiller_exceptions
from armada.exceptions.armada_exceptions import ProtectedReleaseException
TEST_YAML = """
@ -43,6 +45,27 @@ data:
- example-chart-1
- example-chart-2
- example-chart-3
- example-chart-4
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: example-chart-4
data:
chart_name: test_chart_4
release: test_chart_4
namespace: test
values: {}
source:
type: local
location: /tmp/dummy/armada
subpath: chart_4
dependencies: []
test: true
wait:
timeout: 10
upgrade:
no_hooks: false
---
schema: armada/Chart/v1
metadata:
@ -64,9 +87,6 @@ data:
timeout: 10
upgrade:
no_hooks: false
options:
force: true
recreate_pods: true
---
schema: armada/Chart/v1
metadata:
@ -113,7 +133,8 @@ data:
CHART_SOURCES = [('git://github.com/dummy/armada', 'chart_1'),
('/tmp/dummy/armada', 'chart_2'),
('/tmp/dummy/armada', 'chart_3')]
('/tmp/dummy/armada', 'chart_3'),
('/tmp/dummy/armada', 'chart_4')]
class ArmadaHandlerTestCase(base.ArmadaTestCase):
@ -195,13 +216,31 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
'timeout': 10
},
'upgrade': {
'no_hooks': False,
'options': {
'force': True,
'recreate_pods': True
}
'no_hooks': False
}
}
},
{
'chart': {
'dependencies': [],
'chart_name': 'test_chart_4',
'namespace': 'test',
'release': 'test_chart_4',
'source': {
'location': '/tmp/dummy/armada',
'subpath': 'chart_4',
'type': 'local'
},
'source_dir': CHART_SOURCES[3],
'values': {},
'wait': {
'timeout': 10
},
'upgrade': {
'no_hooks': False
},
'test': True
}
}
],
'description': 'this is a test',
@ -261,26 +300,41 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
mock_source.source_cleanup.assert_called_with(
CHART_SOURCES[counter][0])
def _test_sync(self, known_releases):
def _test_sync(self, known_releases, test_success=True,
test_failure_to_run=False):
"""Test install functionality from the sync() method."""
@mock.patch.object(armada.Armada, 'post_flight_ops')
@mock.patch.object(armada.Armada, 'pre_flight_ops')
@mock.patch('armada.handlers.armada.ChartBuilder')
@mock.patch('armada.handlers.armada.Tiller')
def _do_test(mock_tiller, mock_chartbuilder, mock_pre_flight,
mock_post_flight):
@mock.patch.object(armada, 'test_release_for_success')
def _do_test(mock_test_release_for_success, mock_tiller,
mock_chartbuilder, mock_pre_flight, mock_post_flight):
# Instantiate Armada object.
yaml_documents = list(yaml.safe_load_all(TEST_YAML))
armada_obj = armada.Armada(yaml_documents)
armada_obj.show_diff = mock.Mock()
charts = armada_obj.manifest['armada']['chart_groups'][0][
'chart_group']
chart_group = armada_obj.manifest['armada']['chart_groups'][0]
charts = chart_group['chart_group']
m_tiller = mock_tiller.return_value
m_tiller.list_charts.return_value = known_releases
if test_failure_to_run:
def fail(tiller, release, timeout=None):
status = AttrDict(**{
'info': AttrDict(**{
'Description': 'Failed'
})
})
raise tiller_exceptions.ReleaseException(
release, status, 'Test')
mock_test_release_for_success.side_effect = fail
else:
mock_test_release_for_success.return_value = test_success
# Stub out irrelevant methods called by `armada.sync()`.
mock_chartbuilder.get_source_path.return_value = None
mock_chartbuilder.get_helm_chart.return_value = None
@ -290,6 +344,7 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
expected_install_release_calls = []
expected_update_release_calls = []
expected_uninstall_release_calls = []
expected_test_release_for_success_calls = []
for c in charts:
chart = c['chart']
@ -371,6 +426,18 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
timeout=chart['wait']['timeout']
)
)
test_this_chart = chart.get(
'test',
chart_group.get('test_charts', False))
if test_this_chart:
expected_test_release_for_success_calls.append(
mock.call(
m_tiller,
release_name,
timeout=mock.ANY
)
)
# Verify that at least 1 release is either installed or updated.
self.assertTrue(
@ -388,6 +455,18 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
m_tiller.update_release.call_count)
m_tiller.update_release.assert_has_calls(
expected_update_release_calls)
# Verify that the expected number of deployed releases are
# uninstalled with expected arguments.
self.assertEqual(len(expected_uninstall_release_calls),
m_tiller.uninstall_release.call_count)
m_tiller.uninstall_release.assert_has_calls(
expected_uninstall_release_calls)
# Verify that the expected number of deployed releases are
# tested with expected arguments.
self.assertEqual(len(expected_test_release_for_success_calls),
mock_test_release_for_success.call_count)
mock_test_release_for_success.assert_has_calls(
expected_test_release_for_success_calls)
_do_test()
@ -458,6 +537,22 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
ProtectedReleaseException,
_test_method)
def test_armada_sync_test_failure(self):
def _test_method():
self._test_sync([], test_success=False)
self.assertRaises(
tiller_exceptions.TestFailedException,
_test_method)
def test_armada_sync_test_failure_to_run(self):
def _test_method():
self._test_sync([], test_failure_to_run=True)
self.assertRaises(
tiller_exceptions.ReleaseException,
_test_method)
@mock.patch.object(armada.Armada, 'post_flight_ops')
@mock.patch.object(armada.Armada, 'pre_flight_ops')
@mock.patch('armada.handlers.armada.ChartBuilder')

View File

@ -0,0 +1,80 @@
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from armada.handlers import tiller
from armada.handlers import test
from armada.tests.unit import base
from armada.tests.test_utils import AttrDict
class TestHandlerTestCase(base.ArmadaTestCase):
def _test_test_release_for_success(self, expected_success, results):
@mock.patch('armada.handlers.tiller.K8s')
def do_test(_):
tiller_obj = tiller.Tiller('host', '8080', None)
release = 'release'
tiller_obj.test_release = mock.Mock()
tiller_obj.test_release.return_value = AttrDict(**{
'results': results
})
success = test.test_release_for_success(tiller_obj, release)
self.assertEqual(expected_success, success)
do_test()
def test_no_results(self):
self._test_test_release_for_success(True, [])
def test_unknown(self):
self._test_test_release_for_success(False, [
AttrDict(**{
'status': test.TESTRUN_STATUS_SUCCESS
}),
AttrDict(**{
'status': test.TESTRUN_STATUS_UNKNOWN
})
])
def test_success(self):
self._test_test_release_for_success(True, [
AttrDict(**{
'status': test.TESTRUN_STATUS_SUCCESS
})
])
def test_failure(self):
self._test_test_release_for_success(False, [
AttrDict(**{
'status': test.TESTRUN_STATUS_SUCCESS
}),
AttrDict(**{
'status': test.TESTRUN_STATUS_FAILURE
})
])
def test_running(self):
self._test_test_release_for_success(False, [
AttrDict(**{
'status': test.TESTRUN_STATUS_SUCCESS
}),
AttrDict(**{
'status': test.TESTRUN_STATUS_RUNNING
})
])

View File

@ -17,7 +17,9 @@ from mock import MagicMock
from armada.exceptions import tiller_exceptions as ex
from armada.handlers import tiller
from armada.handlers import test
from armada.tests.unit import base
from armada.tests.test_utils import AttrDict
class TillerTestCase(base.ArmadaTestCase):
@ -421,8 +423,80 @@ class TillerTestCase(base.ArmadaTestCase):
self.assertEqual(expected_result, result)
def _test_test_release(self, grpc_response_mock):
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
@mock.patch('armada.handlers.tiller.K8s')
@mock.patch('armada.handlers.tiller.grpc')
@mock.patch('armada.handlers.tiller.Config')
@mock.patch.object(tiller, 'TestReleaseRequest')
@mock.patch.object(tiller, 'ReleaseServiceStub')
def do_test(self, mock_release_service_stub,
mock_test_release_request, mock_config,
_, __):
tiller_obj = tiller.Tiller('host', '8080', None)
release = 'release'
test_suite_run = {}
mock_release_service_stub.return_value.RunReleaseTest\
.return_value = grpc_response_mock
tiller_obj.get_release_status = mock.Mock()
tiller_obj.get_release_status.return_value = AttrDict(**{
'info': AttrDict(**{
'status': AttrDict(**{
'last_test_suite_run': test_suite_run
}),
'Description': 'Failed'
})
})
result = tiller_obj.test_release(release)
self.assertEqual(test_suite_run, result)
do_test(self)
def test_test_release_no_tests(self):
self._test_test_release([
AttrDict(**{
'msg': 'No Tests Found',
'status': test.TESTRUN_STATUS_UNKNOWN
})
])
def test_test_release_success(self):
self._test_test_release([
AttrDict(**{
'msg': 'RUNNING: ...',
'status': test.TESTRUN_STATUS_RUNNING
}),
AttrDict(**{
'msg': 'SUCCESS: ...',
'status': test.TESTRUN_STATUS_SUCCESS
})
])
def test_test_release_failure(self):
self._test_test_release([
AttrDict(**{
'msg': 'RUNNING: ...',
'status': test.TESTRUN_STATUS_RUNNING
}),
AttrDict(**{
'msg': 'FAILURE: ...',
'status': test.TESTRUN_STATUS_FAILURE
})
])
def test_test_release_failure_to_run(self):
class Iterator:
def __iter__(self):
return self
def __next__(self):
raise Exception
def test():
self._test_test_release(Iterator())
self.assertRaises(ex.ReleaseException, test)