Adding new screen formatter

This work relates to efforts to remove the config file (see spec).
Here we are adding a new formatter plugin "screen" to produce the
VT100 colored output report that is dumped to a terminal. Before
this was done by some detection logic in the txt formatter. This
chnges the txt formatter so it now always dumps simple text output.

Work has also been done to move logic relating to specific formats
out of the manager class. Formatters are plugins and as such
should be entirely opaque to the manager.

Change-Id: Ifc76eace1f84e8808480a352f403eff757641e8f
This commit is contained in:
Timothy Kelsey 2015-11-27 11:44:01 +00:00
parent d96a5c3bbd
commit b6e0bcaa3a
18 changed files with 526 additions and 403 deletions

View File

@ -161,7 +161,7 @@ def main():
)
parser.add_argument(
'-f', '--format', dest='output_format', action='store',
default='txt', help='specify output format',
default='screen', help='specify output format',
choices=sorted(extension_mgr.formatter_names)
)
parser.add_argument(
@ -245,16 +245,6 @@ def main():
logger.info("running on Python %d.%d.%d", sys.version_info.major,
sys.version_info.minor, sys.version_info.micro)
# check ability to write output file, if requested
if args.output_file is not None:
check_dest = b_mgr.check_output_destination(args.output_file)
if check_dest is not True:
logger.error(
'Problem with specified output destination\n\t%s: %s',
check_dest, args.output_file
)
sys.exit(2)
# no point running if there are no tests available
if not b_mgr.has_tests:
logger.error('Could not find any tests to apply, please check '

View File

@ -15,7 +15,6 @@
# under the License.
import logging
import sys
import yaml
@ -96,44 +95,8 @@ class BanditConfig():
:return: -
'''
self._settings = {}
self._init_output_colors()
self._init_plugin_name_pattern()
def _init_output_colors(self):
'''Sets the settings colors
sets settings['color_xxx'] where xxx is DEFAULT, HEADER, LOW, MEDIUM,
HIGH
'''
colors = ['HEADER', 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH']
color_settings = dict()
isatty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
for color in colors:
# if not a TTY, overwrite color codes in configuration
if not isatty:
color_settings[color] = ""
# else read color codes in from the config
else:
# grab the default color from constant
color_settings[color] = constants.color[color]
# check if the option has been set in config file
options_string = 'output_colors.' + color
if self.get_option(options_string):
color_string = self.get_option(options_string)
# some manipulation is needed because escape string doesn't
# come back from yaml correctly
if color_string.find('['):
right_half = color_string[color_string.find('['):]
left_half = '\033'
color_settings[color] = left_half + right_half
# update the settings dict with the color value
settings_string = 'color_' + color
self._settings[settings_string] = color_settings[color]
def _init_plugin_name_pattern(self):
'''Sets settings['plugin_name_pattern'] from default or config file.'''
plugin_name_pattern = constants.plugin_name_pattern

View File

@ -14,15 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
# default output text colors
color = {
'DEFAULT': '\033[0m',
'HEADER': '\033[95m',
'LOW': '\033[94m',
'MEDIUM': '\033[93m',
'HIGH': '\033[91m',
}
# default plugin name pattern
plugin_name_pattern = '*.py'

View File

@ -151,30 +151,23 @@ class BanditManager():
:param sev_level: Which severity levels to show (LOW, MEDIUM, HIGH)
:param conf_level: Which confidence levels to show (LOW, MEDIUM, HIGH)
:param output_filename: File to store results
:param output_format: output format, 'csv', 'json', 'txt', 'xml', or
'html'
:param output_format: output format plugin name
:return: -
'''
try:
formatters_mgr = extension_loader.MANAGER.formatters_mgr
try:
formatter = formatters_mgr[output_format]
except KeyError: # Unrecognized format, so use text instead
formatter = formatters_mgr['txt']
output_format = 'txt'
if output_format == 'csv':
lines = 1
elif formatter.name == 'txt' and output_filename:
output_format = 'plain'
if output_format not in formatters_mgr:
output_format = 'screen'
formatter = formatters_mgr[output_format]
report_func = formatter.plugin
report_func(self, filename=output_filename,
sev_level=sev_level, conf_level=conf_level,
lines=lines, out_format=output_format)
lines=lines)
except IOError:
print("Unable to write to file: %s" % output_filename)
except Exception:
raise RuntimeError("Unable to output report using '%s' formatter."
% output_format)
def discover_files(self, targets, recursive=False, excluded_paths=''):
'''Add tests directly and from a directory to the test set
@ -226,21 +219,6 @@ class BanditManager():
self.files_list = sorted(files_list)
self.excluded_files = sorted(excluded_files)
def check_output_destination(self, output_filename):
# case where file already exists
if os.path.isfile(output_filename):
return 'File already exists'
else:
# case where specified destination is a directory
if os.path.isdir(output_filename):
return 'Specified destination is a directory'
# case where specified destination is not writable
try:
open(output_filename, 'w').close()
except IOError:
return 'Specified destination is not writable'
return True
def run_tests(self):
'''Runs through all files in the scope

View File

@ -30,16 +30,17 @@ logger = logging.getLogger(__name__)
@contextlib.contextmanager
def output_file(filename, filemode):
isfileoutput = filename is not None
try:
out = sys.stdout
if isfileoutput:
if filename is not None:
if os.path.isdir(filename):
raise RuntimeError('Specified destination is a directory')
out = open(filename, filemode)
yield out
except Exception:
raise
finally:
if isfileoutput:
if out is not sys.stdout:
out.close()

View File

@ -40,8 +40,7 @@ from bandit.core import utils
logger = logging.getLogger(__name__)
def report(manager, filename, sev_level, conf_level, lines=-1,
out_format='csv'):
def report(manager, filename, sev_level, conf_level, lines=-1):
'''Prints issues in CSV format
:param manager: the bandit manager object
@ -49,7 +48,6 @@ def report(manager, filename, sev_level, conf_level, lines=-1,
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
:param out_format: The ouput format name
'''
results = manager.get_issue_list(sev_level=sev_level,

View File

@ -152,8 +152,7 @@ logger = logging.getLogger(__name__)
@accepts_baseline
def report(manager, filename, sev_level, conf_level, lines=-1,
out_format='html'):
def report(manager, filename, sev_level, conf_level, lines=-1):
"""Writes issues to 'filename' in HTML format
:param manager: the bandit manager object
@ -161,7 +160,6 @@ def report(manager, filename, sev_level, conf_level, lines=-1,
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
:param out_format: The output format name
"""
header_block = """

View File

@ -101,8 +101,7 @@ from bandit.core import utils
logger = logging.getLogger(__name__)
def report(manager, filename, sev_level, conf_level, lines=-1,
out_format='json'):
def report(manager, filename, sev_level, conf_level, lines=-1):
'''''Prints issues in JSON format
:param manager: the bandit manager object
@ -110,7 +109,6 @@ def report(manager, filename, sev_level, conf_level, lines=-1,
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
:param out_format: The ouput format name
'''
stats = dict(zip(manager.files_list, manager.scores))

154
bandit/formatters/screen.py Normal file
View File

@ -0,0 +1,154 @@
# Copyright (c) 2015 Hewlett Packard Enterprise
# -*- coding:utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import datetime
import logging
from bandit.core import constants
from bandit.core.test_properties import accepts_baseline
logger = logging.getLogger(__name__)
color = {
'DEFAULT': '\033[0m',
'HEADER': '\033[95m',
'LOW': '\033[94m',
'MEDIUM': '\033[93m',
'HIGH': '\033[91m',
}
def header(text, *args):
return u'%s%s%s' % (color['HEADER'], (text % args), color['DEFAULT'])
def get_verbose_details(manager):
bits = []
bits.append(header(u'Files in scope (%i):', len(manager.files_list)))
tpl = u"\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})"
bits.extend([tpl % (item, sum(score['SEVERITY']), sum(score['CONFIDENCE']))
for (item, score)
in zip(manager.files_list, manager.scores)])
bits.append(header(u'Files excluded (%i):', len(manager.excluded_files)))
bits.extend([u"\t%s" % fname for fname in manager.excluded_files])
return '\n'.join([str(bit) for bit in bits])
def get_metrics(manager):
bits = []
bits.append(header("\nRun metrics:"))
for (criteria, default) in constants.CRITERIA:
bits.append("\tTotal issues (by %s):" % (criteria.lower()))
for rank in constants.RANKING:
bits.append("\t\t%s: %s" % (
rank.capitalize(),
manager.metrics.data['_totals']['%s.%s' % (criteria, rank)]))
return '\n'.join([str(bit) for bit in bits])
def _output_issue_str(issue, indent, show_lineno=True, show_code=True,
lines=-1):
# returns a list of lines that should be added to the existing lines list
bits = []
bits.append("%s%s>> Issue: [%s] %s" % (
indent, color[issue.severity], issue.test, issue.text))
bits.append("%s Severity: %s Confidence: %s" % (
indent, issue.severity.capitalize(), issue.confidence.capitalize()))
bits.append("%s Location: %s:%s%s" % (
indent, issue.fname,
issue.lineno if show_lineno else "",
color['DEFAULT']))
if show_code:
bits.extend([indent + l for l in
issue.get_code(lines, True).split('\n')])
return '\n'.join([str(bit) for bit in bits])
def get_results(manager, sev_level, conf_level, lines):
bits = []
issues = manager.get_issue_list(sev_level, conf_level)
baseline = not isinstance(issues, list)
candidate_indent = ' ' * 10
if not len(issues):
return u"\tNo issues identified."
for issue in issues:
# if not a baseline or only one candidate we know the issue
if not baseline or len(issues[issue]) == 1:
bits.append(_output_issue_str(issue, "", lines=lines))
# otherwise show the finding and the candidates
else:
bits.append(_output_issue_str(issue, "",
show_lineno=False,
show_code=False))
bits.append(u'\n-- Candidate Issues --')
for candidate in issues[issue]:
bits.append(_output_issue_str(candidate,
candidate_indent,
lines=lines))
bits.append('\n')
bits.append(u'-' * 50)
return '\n'.join([str(bit) for bit in bits])
def do_print(bits):
# needed so we can mock this stuff
print('\n'.join([str(bit) for bit in bits]))
@accepts_baseline
def report(manager, filename, sev_level, conf_level, lines=-1):
"""Prints discovered issues formatted for screen reading
This makes use of VT100 terminal codes for colored text.
:param manager: the bandit manager object
:param filename: The output file name, or None for stdout
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
"""
bits = []
bits.append(header("Run started:%s", datetime.datetime.utcnow()))
if manager.verbose:
bits.append(get_verbose_details(manager))
bits.append('\tTotal lines of code: %i' %
(manager.metrics.data['_totals']['loc']))
bits.append('\tTotal lines skipped (#nosec): %i' %
(manager.metrics.data['_totals']['nosec']))
bits.append(get_metrics(manager))
bits.append(header("Files skipped (%i):", len(manager.skipped)))
bits.extend(["\t%s (%s)" % skip for skip in manager.skipped])
bits.append(header("\nTest results:"))
bits.append(get_results(manager, sev_level, conf_level, lines))
do_print(bits)
if filename is not None:
logger.info(("Screen formatter output was not written to file: %s"
", consdier '-f txt'") % filename)

View File

@ -35,7 +35,8 @@ Sample Output
"""
import collections
from __future__ import print_function
import datetime
import logging
@ -46,168 +47,112 @@ from bandit.core import utils
logger = logging.getLogger(__name__)
@accepts_baseline
def report(manager, filename, sev_level, conf_level, lines=-1,
out_format='txt'):
"""Prints baseline issues in the text format
def get_verbose_details(manager):
bits = []
bits.append(u'Files in scope (%i):' % len(manager.files_list))
tpl = u"\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})"
bits.extend([tpl % (item, sum(score['SEVERITY']), sum(score['CONFIDENCE']))
for (item, score)
in zip(manager.files_list, manager.scores)])
bits.append(u'Files excluded (%i):' % len(manager.excluded_files))
bits.extend([u"\t%s" % fname for fname in manager.excluded_files])
return '\n'.join([str(bit) for bit in bits])
This is identical to normal text output except for each issue
we're going to output the issue we've found and the candidate
issues in the file.
def get_metrics(manager):
bits = []
bits.append("\nRun metrics:")
for (criteria, default) in constants.CRITERIA:
bits.append("\tTotal issues (by %s):" % (criteria.lower()))
for rank in constants.RANKING:
bits.append("\t\t%s: %s" % (
rank.capitalize(),
manager.metrics.data['_totals']['%s.%s' % (criteria, rank)]))
return '\n'.join([str(bit) for bit in bits])
def _output_issue_str(issue, indent, show_lineno=True, show_code=True,
lines=-1):
# returns a list of lines that should be added to the existing lines list
bits = []
bits.append("%s>> Issue: [%s] %s" % (
indent, issue.test, issue.text))
bits.append("%s Severity: %s Confidence: %s" % (
indent, issue.severity.capitalize(), issue.confidence.capitalize()))
bits.append("%s Location: %s:%s" % (
indent, issue.fname, issue.lineno if show_lineno else ""))
if show_code:
bits.extend([indent + l for l in
issue.get_code(lines, True).split('\n')])
return '\n'.join([str(bit) for bit in bits])
def get_results(manager, sev_level, conf_level, lines):
bits = []
issues = manager.get_issue_list(sev_level, conf_level)
baseline = not isinstance(issues, list)
candidate_indent = ' ' * 10
if not len(issues):
return u"\tNo issues identified."
for issue in issues:
# if not a baseline or only one candidate we know the issue
if not baseline or len(issues[issue]) == 1:
bits.append(_output_issue_str(issue, "", lines=lines))
# otherwise show the finding and the candidates
else:
bits.append(_output_issue_str(issue, "",
show_lineno=False,
show_code=False))
bits.append(u'\n-- Candidate Issues --')
for candidate in issues[issue]:
bits.append(_output_issue_str(candidate,
candidate_indent,
lines=lines))
bits.append('\n')
bits.append(u'-' * 50)
return '\n'.join([str(bit) for bit in bits])
@accepts_baseline
def report(manager, filename, sev_level, conf_level, lines=-1):
"""Prints discovered issues in the text format
:param manager: the bandit manager object
:param filename: The output file name, or None for stdout
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
:param out_format: The output format name
"""
tmpstr_list = []
# use a defaultdict to default to an empty string
color = collections.defaultdict(str)
candidate_indent = ' ' * 10
if out_format == 'txt':
# get text colors from settings for TTY output
get_setting = manager.b_conf.get_setting
color = {'HEADER': get_setting('color_HEADER'),
'DEFAULT': get_setting('color_DEFAULT'),
'LOW': get_setting('color_LOW'),
'MEDIUM': get_setting('color_MEDIUM'),
'HIGH': get_setting('color_HIGH')
}
# print header
tmpstr_list.append("%sRun started:%s\n\t%s\n" % (
color['HEADER'],
color['DEFAULT'],
datetime.datetime.utcnow()
))
bits = []
bits.append("Run started:%s" % datetime.datetime.utcnow())
if manager.verbose:
# print which files were inspected
tmpstr_list.append("\n%sFiles in scope (%s):%s\n" % (
color['HEADER'], len(manager.files_list),
color['DEFAULT']
))
for (item, score) in zip(manager.files_list, manager.scores):
score_dict = {'SEVERITY': sum(score['SEVERITY']),
'CONFIDENCE': sum(score['CONFIDENCE'])}
tmpstr_list.append("\t%s (score: %s)\n" % (item, score_dict))
bits.append(get_verbose_details(manager))
# print which files were excluded and why
tmpstr_list.append("\n%sFiles excluded (%s):%s\n" %
(color['HEADER'], len(manager.excluded_files),
color['DEFAULT']))
for fname in manager.excluded_files:
tmpstr_list.append("\t%s\n" % fname)
bits.append('\tTotal lines of code: %i' %
(manager.metrics.data['_totals']['loc']))
# print out basic metrics from run
metrics_summary = ''
for (label, metric) in [
('Total lines of code', 'loc'),
('Total lines skipped (#nosec)', 'nosec')
]:
metrics_summary += "\t%s: %s\n" % (
label, manager.metrics.data['_totals'][metric]
)
for (criteria, default) in constants.CRITERIA:
metrics_summary += "\tTotal issues (by %s):\n" % (
criteria.lower()
)
for rank in constants.RANKING:
metrics_summary += "\t\t%s: %s\n" % (
rank.capitalize(),
manager.metrics.data['_totals']['%s.%s' % (criteria, rank)]
)
tmpstr_list.append("\n%sRun metrics:%s\n%s" % (
color['HEADER'],
color['DEFAULT'],
metrics_summary
))
bits.append('\tTotal lines skipped (#nosec): %i' %
(manager.metrics.data['_totals']['nosec']))
# print which files were skipped and why
tmpstr_list.append("\n%sFiles skipped (%s):%s\n" % (
color['HEADER'], len(manager.skipped),
color['DEFAULT']
))
for (fname, reason) in manager.skipped:
tmpstr_list.append("\t%s (%s)\n" % (fname, reason))
# print the results
tmpstr_list.append("\n%sTest results:%s\n" % (
color['HEADER'], color['DEFAULT']
))
issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level)
if not len(issues):
tmpstr_list.append("\tNo issues identified.\n")
baseline = not isinstance(issues, list)
for issue in issues:
# if not a baseline or only one candidate we know the issue
if not baseline or len(issues[issue]) == 1:
tmpstr_list += _output_issue_str(issue, color, "",
lines=lines)
# otherwise show the finding and the candidates
else:
tmpstr_list += _output_issue_str(issue, color, "",
show_lineno=False,
show_code=False)
tmpstr_list.append('\n-- Candidate Issues --\n')
for candidate in issues[issue]:
tmpstr_list += _output_issue_str(candidate, color,
candidate_indent,
lines=lines)
tmpstr_list.append('\n')
tmpstr_list.append(str('-' * 50 + '\n'))
result = ''.join(tmpstr_list)
bits.append(get_metrics(manager))
bits.append("Files skipped (%i):" % len(manager.skipped))
bits.extend(["\t%s (%s)" % skip for skip in manager.skipped])
bits.append("\nTest results:")
bits.append(get_results(manager, sev_level, conf_level, lines))
result = '\n'.join([str(bit) for bit in bits])
with utils.output_file(filename, 'w') as fout:
fout.write(result)
if filename is not None:
logger.info("Text output written to file: %s", filename)
def _output_issue_str(issue, color, indent, show_lineno=True, show_code=True,
lines=-1):
# returns a list of lines that should be added to the existing lines list
tmpstr_list = list()
tmpstr_list.append("\n%s%s>> Issue: [%s] %s\n" % (
indent,
color.get(issue.severity, color['DEFAULT']),
issue.test,
issue.text
))
tmpstr_list.append("%s Severity: %s Confidence: %s\n" % (
indent,
issue.severity.capitalize(),
issue.confidence.capitalize()
))
tmpstr_list.append("%s Location: %s:%s\n" % (
indent,
issue.fname,
issue.lineno if show_lineno else ""
))
tmpstr_list.append(color['DEFAULT'])
if show_code:
tmpstr_list += list(indent + l + '\n' for l in
issue.get_code(lines, True).split('\n'))
return tmpstr_list

View File

@ -43,8 +43,7 @@ from xml.etree import cElementTree as ET
logger = logging.getLogger(__name__)
def report(manager, filename, sev_level, conf_level, lines=-1,
out_format='xml'):
def report(manager, filename, sev_level, conf_level, lines=-1):
'''Prints issues in XML formt
:param manager: the bandit manager object
@ -52,7 +51,6 @@ def report(manager, filename, sev_level, conf_level, lines=-1,
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
:param out_format: The ouput format name
'''
issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level)

View File

@ -32,6 +32,7 @@ bandit.formatters =
txt = bandit.formatters.text:report
xml = bandit.formatters.xml:report
html = bandit.formatters.html:report
screen = bandit.formatters.screen:report
bandit.plugins =
# bandit/plugins/app_debug.py
flask_debug_true = bandit.plugins.app_debug:flask_debug_true

View File

@ -82,5 +82,5 @@ class RuntimeTests(testtools.TestCase):
self.assertIn("High: 2", output)
self.assertIn("Files skipped (0):", output)
self.assertIn("Issue: [blacklist_imports] Consider possible", output)
self.assertIn("imports.py:2\n", output)
self.assertIn("imports.py:4\n", output)
self.assertIn("imports.py:2", output)
self.assertIn("imports.py:4", output)

View File

@ -54,11 +54,6 @@ class TestInit(testtools.TestCase):
b_config = config.BanditConfig(f.name)
# After initialization, can get settings.
self.assertEqual('', b_config.get_setting('color_HEADER'))
self.assertEqual('', b_config.get_setting('color_DEFAULT'))
self.assertEqual('', b_config.get_setting('color_LOW'))
self.assertEqual('', b_config.get_setting('color_MEDIUM'))
self.assertEqual('', b_config.get_setting('color_HIGH'))
self.assertEqual('*.py', b_config.get_setting('plugin_name_pattern'))
self.assertEqual({example_key: example_value}, b_config.config)
@ -81,38 +76,6 @@ class TestInit(testtools.TestCase):
self.assertRaisesRegex(
utils.ConfigFileInvalidYaml, f.name, config.BanditConfig, f.name)
def test_colors_isatty_defaults(self):
# When stdout says it's a tty there are default colors.
f = self.useFixture(TempFile())
self.useFixture(
fixtures.MockPatch('sys.stdout.isatty', return_value=True))
b_config = config.BanditConfig(f.name)
self.assertEqual('\x1b[95m', b_config.get_setting('color_HEADER'))
self.assertEqual('\x1b[0m', b_config.get_setting('color_DEFAULT'))
self.assertEqual('\x1b[94m', b_config.get_setting('color_LOW'))
self.assertEqual('\x1b[93m', b_config.get_setting('color_MEDIUM'))
self.assertEqual('\x1b[91m', b_config.get_setting('color_HIGH'))
def test_colors_isatty_config(self):
# When stdout says it's a tty the colors can be set in bandit.yaml
self.useFixture(
fixtures.MockPatch('sys.stdout.isatty', return_value=True))
sample_yaml = """
output_colors:
HEADER: '\\033[23m'
"""
f = self.useFixture(TempFile(sample_yaml))
b_config = config.BanditConfig(f.name)
self.assertEqual('\x1b[23m', b_config.get_setting('color_HEADER'))
class TestGetOption(testtools.TestCase):
def setUp(self):

View File

@ -27,12 +27,6 @@ from bandit.core import issue
from bandit.core import constants
from bandit.core import extension_loader
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins
else:
import builtins
class TempFile(fixtures.Fixture):
def __init__(self, contents=None):
@ -157,39 +151,6 @@ class ManagerTests(testtools.TestCase):
self.assertEqual([3,2,1], r)
@mock.patch('os.path.isfile')
def test_check_output_destination_exists(self, isfile):
isfile.return_value = True
a = self.manager.check_output_destination('derp')
self.assertEqual(a, 'File already exists')
@mock.patch('os.path.isfile')
@mock.patch('os.path.isdir')
def test_check_output_destination_dir(self, isdir, isfile):
isfile.return_value = False
isdir.return_value = True
a = self.manager.check_output_destination('derp')
self.assertEqual(a, 'Specified destination is a directory')
@mock.patch('os.path.isfile')
@mock.patch('os.path.isdir')
def test_check_output_destination_bad(self, isfile, isdir):
with mock.patch.object(builtins, 'open') as b_open:
isfile.return_value = False
isdir.return_value = False
b_open.side_effect = IOError()
a = self.manager.check_output_destination('derp')
self.assertEqual(a, 'Specified destination is not writable')
@mock.patch('os.path.isfile')
@mock.patch('os.path.isdir')
def test_check_output_destination_bad(self, isfile, isdir):
with mock.patch.object(builtins, 'open'):
isfile.return_value = False
isdir.return_value = False
a = self.manager.check_output_destination('derp')
self.assertEqual(a, True)
@mock.patch('os.path.isdir')
def test_discover_files_recurse_skip(self, isdir):
isdir.return_value = True
@ -232,53 +193,6 @@ class ManagerTests(testtools.TestCase):
self.assertEqual(self.manager.files_list, ['thing'])
self.assertEqual(self.manager.excluded_files, [])
def test_output_results_bad(self):
fmt = mock.MagicMock()
with mock.patch('bandit.core.extension_loader.MANAGER') as m:
m.formatters_mgr = {'test': fmt}
self.assertRaises(KeyError, self.manager.output_results,
3, constants.LOW, constants.LOW, None, "txt")
def test_output_results_txt(self):
fmt = mock.MagicMock()
with mock.patch('bandit.core.extension_loader.MANAGER') as m:
m.formatters_mgr = {'txt': fmt}
self.manager.output_results(3, constants.LOW, constants.LOW,
None, "test")
fmt.plugin.assert_called_with(self.manager, conf_level='LOW',
filename=None, lines=3,
out_format='txt', sev_level='LOW')
def test_output_results_csv(self):
fmt = mock.MagicMock()
with mock.patch('bandit.core.extension_loader.MANAGER') as m:
m.formatters_mgr = {'csv': fmt}
self.manager.output_results(3, constants.LOW, constants.LOW,
None, "csv")
fmt.plugin.assert_called_with(self.manager, conf_level='LOW',
filename=None, lines=1,
out_format='csv', sev_level='LOW')
def test_output_results_txt_plain(self):
fmt = mock.MagicMock()
fmt.name = 'txt'
with mock.patch('bandit.core.extension_loader.MANAGER') as m:
m.formatters_mgr = {'txt': fmt}
self.manager.output_results(3, constants.LOW, constants.LOW,
"dummy", "test")
fmt.plugin.assert_called_with(self.manager, conf_level='LOW',
filename="dummy", lines=3,
out_format='plain', sev_level='LOW')
def test_output_results_io_error(self):
fmt = mock.MagicMock()
fmt.name = 'txt'
fmt.plugin.side_effect = IOError
with mock.patch('bandit.core.extension_loader.MANAGER') as m:
m.formatters_mgr = {'txt': fmt}
self.manager.output_results(3, constants.LOW, constants.LOW,
"dummy", "test")
def test_compare_baseline(self):
issue_a = self._get_issue_instance()
issue_a.fname = 'file1.py'

View File

@ -16,6 +16,7 @@
# under the License.
import ast
import mock
import os
import shutil
import sys
@ -27,6 +28,12 @@ import six
from bandit.core import utils as b_utils
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins
else:
import builtins
def _touch(path):
'''Create an empty file at ``path``.'''
@ -272,3 +279,24 @@ class UtilTests(testtools.TestCase):
self.assertEqual('deep value', b_utils.deepgetattr(a, 'b.c.d'))
self.assertEqual('deep value 2', b_utils.deepgetattr(a, 'b.c.d2'))
self.assertRaises(AttributeError, b_utils.deepgetattr, a.b, 'z')
@mock.patch('os.path.isdir')
def test_check_output_destination_dir(self, isdir):
isdir.return_value = True
def _b_tester(a, b):
with b_utils.output_file(a, b):
pass
self.assertRaises(RuntimeError, _b_tester, 'derp', 'r')
@mock.patch('os.path.isdir')
def test_check_output_destination_bad(self, isdir):
with mock.patch.object(builtins, 'open') as b_open:
isdir.return_value = False
b_open.side_effect = IOError()
def _b_tester(a, b):
with b_utils.output_file(a, b):
pass
self.assertRaises(IOError, _b_tester, 'derp', 'r')

View File

@ -0,0 +1,211 @@
# Copyright (c) 2015 VMware, Inc.
# Copyright (c) 2015 Hewlett Packard Enterprise
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import os
import mock
import tempfile
import testtools
import bandit
from bandit.core import config
from bandit.core import manager
from bandit.core import issue
from bandit.formatters import screen
class ScreenFormatterTests(testtools.TestCase):
def setUp(self):
super(ScreenFormatterTests, self).setUp()
@mock.patch('bandit.core.issue.Issue.get_code')
def test_output_issue(self, get_code):
issue = _get_issue_instance()
get_code.return_value = 'DDDDDDD'
indent_val = 'CCCCCCC'
def _template(_issue, _indent_val, _code, _color):
return_val = ["{}{}>> Issue: [{}] {}".
format(_indent_val, _color, _issue.test,
_issue.text),
"{} Severity: {} Confidence: {}".
format(_indent_val, _issue.severity.capitalize(),
_issue.confidence.capitalize()),
"{} Location: {}:{}{}".
format(_indent_val, _issue.fname, _issue.lineno,
screen.color['DEFAULT'])]
if _code:
return_val.append("{}{}".format(_indent_val, _code))
return '\n'.join(return_val)
issue_text = screen._output_issue_str(issue, indent_val)
expected_return = _template(issue, indent_val, 'DDDDDDD',
screen.color['MEDIUM'])
self.assertEqual(expected_return, issue_text)
issue_text = screen._output_issue_str(issue, indent_val,
show_code=False)
expected_return = _template(issue, indent_val, '',
screen.color['MEDIUM'])
self.assertEqual(expected_return, issue_text)
issue.lineno = ''
issue_text = screen._output_issue_str(issue, indent_val,
show_lineno=False)
expected_return = _template(issue, indent_val, 'DDDDDDD',
screen.color['MEDIUM'])
self.assertEqual(expected_return, issue_text)
@mock.patch('bandit.core.manager.BanditManager.get_issue_list')
def test_no_issues(self, get_issue_list):
cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
conf = config.BanditConfig(cfg_file)
self.manager = manager.BanditManager(conf, 'file')
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
get_issue_list.return_value = OrderedDict()
with mock.patch('bandit.formatters.screen.do_print') as m:
screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
lines=5)
self.assertIn('No issues identified.',
'\n'.join([str(a) for a in m.call_args]))
@mock.patch('bandit.core.manager.BanditManager.get_issue_list')
def test_report_nobaseline(self, get_issue_list):
cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
conf = config.BanditConfig(cfg_file)
self.manager = manager.BanditManager(conf, 'file')
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
self.manager.verbose = True
self.manager.files_list = ['binding.py']
self.manager.scores = [{'SEVERITY': [0, 0, 0, 1],
'CONFIDENCE': [0, 0, 0, 1]}]
self.manager.skipped = [('abc.py', 'File is bad')]
self.manager.excluded_files = ['def.py']
issue_a = _get_issue_instance()
issue_b = _get_issue_instance()
get_issue_list.return_value = [issue_a, issue_b]
self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
for category in ['SEVERITY', 'CONFIDENCE']:
for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
self.manager.metrics.data['_totals']['%s.%s' %
(category, level)] = 1
# Validate that we're outputting the correct issues
indent_val = ' ' * 10
output_str_fn = 'bandit.formatters.screen._output_issue_str'
with mock.patch(output_str_fn) as output_str:
screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5)
calls = [mock.call(issue_a, '', lines=5),
mock.call(issue_b, '', lines=5)]
output_str.assert_has_calls(calls, any_order=True)
# Validate that we're outputting all of the expected fields and the
# correct values
with mock.patch('bandit.formatters.screen.do_print') as m:
screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
lines=5)
data = '\n'.join([str(a) for a in m.call_args[0][0]])
expected = 'Run started'
self.assertIn(expected, data)
expected_items = [
screen.header('Files in scope (1):'),
'\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})']
for item in expected_items:
self.assertIn(item, data)
expected = screen.header('Files excluded (1):') + '\n\tdef.py'
self.assertIn(expected, data)
expected = ('Total lines of code: 1000\n\tTotal lines skipped '
'(#nosec): 50')
self.assertIn(expected, data)
expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t'
'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
self.assertIn(expected, data)
expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t'
'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
self.assertIn(expected, data)
expected = (screen.header('Files skipped (1):') +
'\n\tabc.py (File is bad)')
self.assertIn(expected, data)
@mock.patch('bandit.core.manager.BanditManager.get_issue_list')
def test_report_baseline(self, get_issue_list):
cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
conf = config.BanditConfig(cfg_file)
self.manager = manager.BanditManager(conf, 'file')
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
issue_a = _get_issue_instance()
issue_b = _get_issue_instance()
issue_x = _get_issue_instance()
issue_x.fname = 'x'
issue_y = _get_issue_instance()
issue_y.fname = 'y'
issue_z = _get_issue_instance()
issue_z.fname = 'z'
get_issue_list.return_value = OrderedDict([(issue_a, [issue_x]),
(issue_b, [issue_y, issue_z])])
# Validate that we're outputting the correct issues
indent_val = ' ' * 10
output_str_fn = 'bandit.formatters.screen._output_issue_str'
with mock.patch(output_str_fn) as output_str:
screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5)
calls = [mock.call(issue_a, '', lines=5),
mock.call(issue_b, '', show_code=False, show_lineno=False),
mock.call(issue_y, indent_val, lines=5),
mock.call(issue_z, indent_val, lines=5)]
output_str.assert_has_calls(calls, any_order=True)
def _get_issue_instance(severity=bandit.MEDIUM, confidence=bandit.MEDIUM):
new_issue = issue.Issue(severity, confidence, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.lineno = 1
return new_issue

View File

@ -37,38 +37,34 @@ class TextFormatterTests(testtools.TestCase):
def test_output_issue(self, get_code):
issue = _get_issue_instance()
get_code.return_value = 'DDDDDDD'
color = {'MEDIUM': 'AAAAAAA',
'DEFAULT': 'BBBBBBB'}
indent_val = 'CCCCCCC'
def _template(_issue, _color, _indent_val, _code):
return_val = ["\n{}{}>> Issue: [{}] {}\n".
format(_indent_val, _color['MEDIUM'], _issue.test,
def _template(_issue, _indent_val, _code):
return_val = ["{}>> Issue: [{}] {}".
format(_indent_val, _issue.test,
_issue.text),
"{} Severity: {} Confidence: {}\n".
"{} Severity: {} Confidence: {}".
format(_indent_val, _issue.severity.capitalize(),
_issue.confidence.capitalize()),
"{} Location: {}:{}\n".
format(_indent_val, _issue.fname, _issue.lineno),
"{}".format(_color['DEFAULT'])]
"{} Location: {}:{}".
format(_indent_val, _issue.fname, _issue.lineno)]
if _code:
return_val.append("{}{}\n".format(_indent_val, _code))
return return_val
return_val.append("{}{}".format(_indent_val, _code))
return '\n'.join(return_val)
issue_text = b_text._output_issue_str(issue, color, indent_val)
expected_return = _template(issue, color, indent_val, 'DDDDDDD')
issue_text = b_text._output_issue_str(issue, indent_val)
expected_return = _template(issue, indent_val, 'DDDDDDD')
self.assertEqual(expected_return, issue_text)
issue_text = b_text._output_issue_str(issue, color, indent_val,
issue_text = b_text._output_issue_str(issue, indent_val,
show_code=False)
expected_return = _template(issue, color, indent_val, '')
expected_return = _template(issue, indent_val, '')
self.assertEqual(expected_return, issue_text)
issue.lineno = ''
issue_text = b_text._output_issue_str(issue, color, indent_val,
issue_text = b_text._output_issue_str(issue, indent_val,
show_lineno=False)
expected_return = _template(issue, color, indent_val, 'DDDDDDD')
expected_return = _template(issue, indent_val, 'DDDDDDD')
self.assertEqual(expected_return, issue_text)
@ -110,8 +106,6 @@ class TextFormatterTests(testtools.TestCase):
issue_a = _get_issue_instance()
issue_b = _get_issue_instance()
color = {'DEFAULT': '', 'HEADER': '', 'HIGH': '', 'MEDIUM': '', 'LOW': ''}
get_issue_list.return_value = [issue_a, issue_b]
self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
@ -126,8 +120,8 @@ class TextFormatterTests(testtools.TestCase):
with mock.patch(output_str_fn) as output_str:
b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5)
calls = [mock.call(issue_a, color, '', lines=5),
mock.call(issue_b, color, '', lines=5)]
calls = [mock.call(issue_a, '', lines=5),
mock.call(issue_b, '', lines=5)]
output_str.assert_has_calls(calls, any_order=True)
@ -141,8 +135,8 @@ class TextFormatterTests(testtools.TestCase):
self.assertIn(expected, data)
expected_items = ['Files in scope (1):\n\tbinding.py (score: ',
"'CONFIDENCE': 1",
"'SEVERITY': 1"]
"CONFIDENCE: 1",
"SEVERITY: 1"]
for item in expected_items:
self.assertIn(item, data)
@ -184,8 +178,6 @@ class TextFormatterTests(testtools.TestCase):
issue_z = _get_issue_instance()
issue_z.fname = 'z'
color = {'DEFAULT': '', 'HEADER': '', 'HIGH': '', 'MEDIUM': '', 'LOW': ''}
get_issue_list.return_value = OrderedDict([(issue_a, [issue_x]),
(issue_b, [issue_y, issue_z])])
@ -195,10 +187,10 @@ class TextFormatterTests(testtools.TestCase):
with mock.patch(output_str_fn) as output_str:
b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5)
calls = [mock.call(issue_a, color, '', lines=5),
mock.call(issue_b, color, '', show_code=False, show_lineno=False),
mock.call(issue_y, color, indent_val, lines=5),
mock.call(issue_z, color, indent_val, lines=5)]
calls = [mock.call(issue_a, '', lines=5),
mock.call(issue_b, '', show_code=False, show_lineno=False),
mock.call(issue_y, indent_val, lines=5),
mock.call(issue_z, indent_val, lines=5)]
output_str.assert_has_calls(calls, any_order=True)