Merge "Enable unit tests for py36"

This commit is contained in:
Zuul 2019-07-22 15:03:24 +00:00 committed by Gerrit Code Review
commit ebc8eb843e
15 changed files with 166 additions and 163 deletions

View File

@ -322,7 +322,7 @@ class DynamicCheckHelper(object):
self._metric_to_group[iname] = {}
self._groups[iname] = []
if groups:
self._groups[iname] = groups.keys()
self._groups[iname] = list(groups.keys())
self._grp_metric_map[iname] = {}
self._grp_metric_cache[iname] = {}
self._grp_dimension_map[iname] = {}
@ -831,7 +831,8 @@ class DynamicCheckHelper(object):
@staticmethod
def _normalize_metricname(metric, match_groups=None):
# map metric name first
if match_groups and match_groups.lastindex > 0:
if match_groups and match_groups.lastindex \
and match_groups.lastindex > 0:
metric = '_'.join(match_groups.groups())
metric = re.sub(

View File

@ -15,6 +15,9 @@ import re
import subprocess
from monasca_agent.collector import checks
from oslo_utils import encodeutils
safe_decode = encodeutils.safe_decode
_CACHE_FLUSH_RATE_REGEX = re.compile(r'(\d+) ([kKmMgG][bB])/s flush')
_CACHE_EVICT_RATE_REGEX = re.compile(r'(\d+) ([kKmMgG][bB])/s evict')
@ -381,9 +384,8 @@ class Ceph(checks.AgentCheck):
metrics.update({'ceph.cluster.pgs.scrubbing_count': 0,
'ceph.cluster.pgs.deep_scrubbing_count': 0})
for state in ceph_status['pgmap']['pgs_by_state']:
metrics['ceph.cluster.pgs.' +
state['state_name'].encode('ascii', 'ignore')] = state[
'count']
statename = safe_decode(state['state_name'], incoming='utf-8')
metrics['ceph.cluster.pgs.' + statename] = state['count']
if 'scrubbing' in state['state_name']:
if 'deep' in state['state_name']:
metrics['ceph.cluster.pgs.deep_scrubbing_count'] += state[
@ -420,7 +422,9 @@ class Ceph(checks.AgentCheck):
'health_services']:
for mon in health_service['mons']:
store_stats = mon['store_stats']
mon_metrics[mon['name'].encode('ascii', 'ignore')] = {
mon['name'] = safe_decode(mon['name'], incoming='utf-8')
mon_metrics[mon['name']] = {
'ceph.monitor.total_bytes': mon['kb_total'] * 1e3,
'ceph.monitor.used_bytes': mon['kb_used'] * 1e3,
'ceph.monitor.avail_bytes': mon['kb_avail'] * 1e3,
@ -435,7 +439,7 @@ class Ceph(checks.AgentCheck):
# monitors configured on the cluster
if len(mon_metrics) > 1:
for mon in ceph_status['health']['timechecks']['mons']:
mon_metrics[mon['name'].encode('ascii', 'ignore')].update({
mon_metrics[mon['name']].update({
'ceph.monitor.skew': mon['skew'],
'ceph.monitor.latency': mon['latency']
})
@ -448,7 +452,8 @@ class Ceph(checks.AgentCheck):
"""
osd_metrics = {}
for node in ceph_osd_df['nodes']:
osd_metrics[node['name'].encode('ascii', 'ignore')] = {
nodename = safe_decode(node['name'], incoming='utf-8')
osd_metrics[nodename] = {
'ceph.osd.crush_weight': node['crush_weight'],
'ceph.osd.depth': node['depth'],
'ceph.osd.reweight': node['reweight'],
@ -501,7 +506,9 @@ class Ceph(checks.AgentCheck):
stats = pool['stats']
total_bytes = stats['bytes_used'] + stats['max_avail']
utilization_perc = float(stats['bytes_used']) / total_bytes
pool_metrics[pool['name'].encode('ascii', 'ignore')] = {
pool['name'] = safe_decode(pool['name'], incoming='utf-8')
pool_metrics[pool['name']] = {
'ceph.pool.used_bytes': stats['bytes_used'],
'ceph.pool.used_raw_bytes': stats['raw_bytes_used'],
'ceph.pool.max_avail_bytes': stats['max_avail'],

View File

@ -33,6 +33,7 @@ import uuid
import logging
import logging.handlers
from numbers import Number
from oslo_utils import encodeutils
from six import integer_types
log = logging.getLogger(__name__)
@ -418,7 +419,7 @@ def get_hostname():
p = subprocess.Popen(['/bin/hostname', '-f'], stdout=subprocess.PIPE)
out, err = p.communicate()
if p.returncode == 0:
return out.strip()
return encodeutils.safe_decode(out.strip(), incoming='utf-8')
except Exception:
return None

View File

@ -23,25 +23,25 @@ from monasca_agent.collector.checks_d import ceph
def mocked_check_output(args, shell=True, stderr=''):
output = ''
if '-f json df detail' in args:
output = file(os.path.dirname(os.path.abspath(__file__)) +
output = open(os.path.dirname(os.path.abspath(__file__)) +
'/fixtures/ceph/test_ceph-df.json')
elif '-f json status' in args:
output = file(os.path.dirname(os.path.abspath(__file__)) +
output = open(os.path.dirname(os.path.abspath(__file__)) +
'/fixtures/ceph/test_ceph-status.json')
elif 'status' in args:
output = file(os.path.dirname(os.path.abspath(__file__)) +
output = open(os.path.dirname(os.path.abspath(__file__)) +
'/fixtures/ceph/test_ceph-status.plain')
elif '-f json osd df' in args:
output = file(os.path.dirname(os.path.abspath(__file__)) +
output = open(os.path.dirname(os.path.abspath(__file__)) +
'/fixtures/ceph/test_ceph-osd-df.json')
elif '-f json osd perf' in args:
output = file(os.path.dirname(os.path.abspath(__file__)) +
output = open(os.path.dirname(os.path.abspath(__file__)) +
'/fixtures/ceph/test_ceph-osd-perf.json')
elif '-f json osd dump' in args:
output = file(os.path.dirname(os.path.abspath(__file__)) +
output = open(os.path.dirname(os.path.abspath(__file__)) +
'/fixtures/ceph/test_ceph-osd-dump.json')
elif '-f json osd pool stats' in args:
output = file(os.path.dirname(os.path.abspath(__file__)) +
output = open(os.path.dirname(os.path.abspath(__file__)) +
'/fixtures/ceph/test_ceph-osd-pool-stats.json')
else:
raise subprocess.CalledProcessError(1, cmd=args,

View File

@ -19,6 +19,8 @@ import tempfile
import os
import unittest
from oslo_utils import encodeutils
from monasca_agent.collector.checks_d import json_plugin
import monasca_agent.common.config
@ -31,7 +33,7 @@ def _create_agent_conf():
tempdir = tempfile.mkdtemp()
conf_file = os.path.join(tempdir, 'agent.yaml')
with open(conf_file, 'wb') as fd:
fd.write(
fd.write(encodeutils.safe_encode(
"""
Logging:
collector_log_file: /var/log/monasca/agent/collector.log
@ -42,7 +44,7 @@ def _create_agent_conf():
check_freq: 60
dimensions: {{}}
hostname: {hostname}
""".format(hostname=HOSTNAME)
""".format(hostname=HOSTNAME), incoming="utf-8")
)
config_obj = monasca_agent.common.config.Config(conf_file)
@ -186,6 +188,11 @@ class JsonPluginCheckTest(unittest.TestCase):
file1 = os.path.join(tempdir, 'file1.json')
with open(file1, mode='w') as fd:
fd.write('{')
try:
with open(file1, 'r') as f:
json.load(f)
except (ValueError, TypeError) as e:
errmsg = 'failed parsing json: %s' % e
self.json_plugin.check({'dimensions': {},
'metrics_file': file1})
rmtree(tempdir, ignore_errors=True)
@ -193,10 +200,7 @@ class JsonPluginCheckTest(unittest.TestCase):
fake_now = now
expected = [{'metric': 'monasca.json_plugin.status', 'value': 1,
'dimensions': {'hostname': HOSTNAME},
'value_meta': {
'msg': '%s: failed parsing json: Expecting'
' object: line 1'
' column 1 (char 0)' % file1}}]
'value_meta': {'msg': '%s: %s' % (file1, errmsg)}}]
differs = metricsDiffer(expected, self.json_plugin._metrics)
self.assertEqual('', differs, msg=differs)

View File

@ -13,7 +13,6 @@
# under the License.
import os
import contextlib
import logging
import unittest
import mock
@ -57,13 +56,11 @@ class KibanaCheckTest(unittest.TestCase):
self.kibana_check.check(None)
self.assertEqual('An url to kibana must be specified',
err.exception.message)
str(err.exception))
def test_should_early_exit_if_all_metrics_disabled(self):
with contextlib.nested(
mock.patch.object(util, 'get_hostname'),
mock.patch.object(LOG, 'warning')
) as (_, mock_log_warning):
with mock.patch.object(util, 'get_hostname') as _,\
mock.patch.object(LOG, 'warning') as mock_log_warning:
self.kibana_check._get_kibana_version = mock.Mock()
self.kibana_check._get_data = mock.Mock()
self.kibana_check._process_metrics = mock.Mock()
@ -80,11 +77,9 @@ class KibanaCheckTest(unittest.TestCase):
'file, nothing to do.')
def test_failed_to_retrieve_data(self):
with contextlib.nested(
mock.patch.object(util, 'get_hostname'),
mock.patch.object(LOG, 'error'),
mock.patch.object(LOG, 'exception')
) as (_, mock_log_error, mock_log_exception):
with mock.patch.object(util, 'get_hostname') as _,\
mock.patch.object(LOG, 'error') as mock_log_error,\
mock.patch.object(LOG, 'exception') as mock_log_exception:
exception = Exception('oh')
self.kibana_check._get_data = mock.Mock(
side_effect=exception)
@ -108,10 +103,8 @@ class KibanaCheckTest(unittest.TestCase):
exception)
def test_empty_data_returned(self):
with contextlib.nested(
mock.patch.object(util, 'get_hostname'),
mock.patch.object(LOG, 'warning')
) as (_, mock_log_warning):
with mock.patch.object(util, 'get_hostname') as _, \
mock.patch.object(LOG, 'warning') as mock_log_warning:
self.kibana_check._get_data = mock.Mock(return_value=None)
self.kibana_check.check({
@ -162,7 +155,7 @@ class KibanaCheckTest(unittest.TestCase):
def test_check(self):
fixture_file = os.path.dirname(
os.path.abspath(__file__)) + '/fixtures/test_kibana.json'
response = json.load(file(fixture_file))
response = json.load(open(fixture_file))
metrics = ['heap_size', 'heap_used', 'load',
'req_sec', 'resp_time_avg',

View File

@ -12,13 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import os
import unittest
import mock
import psutil
import json
import six
from monasca_setup.detection.plugins import kibana
@ -66,18 +66,13 @@ class KibanaDetectionTest(unittest.TestCase):
kibana_plugin.available = False
psutil_mock = PSUtilGetProc()
process_iter_patch = mock.patch.object(psutil, 'process_iter',
return_value=[psutil_mock])
isfile_patch = mock.patch.object(os.path, 'isfile',
return_value=config_is_file)
deps_installed_patch = mock.patch.object(kibana_plugin,
'dependencies_installed',
return_value=deps_installed)
with contextlib.nested(process_iter_patch,
isfile_patch,
deps_installed_patch) as (
mock_process_iter, mock_isfile, mock_deps_installed):
with mock.patch.object(psutil, 'process_iter',
return_value=[psutil_mock]) as mock_process_iter, \
mock.patch.object(os.path, 'isfile',
return_value=config_is_file) as mock_isfile, \
mock.patch.object(kibana_plugin,
'dependencies_installed',
return_value=deps_installed) as mock_deps_installed:
kibana_plugin._detect()
self.assertTrue(mock_process_iter.called)
self.assertTrue(mock_isfile.called)
@ -96,7 +91,7 @@ class KibanaDetectionTest(unittest.TestCase):
for instance in kibana_check['instances']:
self.assertIn('metrics', instance)
self.assertEqual(list, type(instance['metrics']))
self.assertItemsEqual(_KIBANA_METRICS, instance['metrics'])
six.assertCountEqual(self, _KIBANA_METRICS, instance['metrics'])
def _verify_process_conf(self, process_check, kibana_user):
# minimize check here, do not check how process should work
@ -168,17 +163,11 @@ class KibanaDetectionTest(unittest.TestCase):
self.assertTrue(self.kibana_plugin.available)
def test_build_config_unreadable_config(self):
patch_log_error = mock.patch.object(LOG, 'error')
patch_log_exception = mock.patch.object(LOG, 'exception')
patch_read_config = mock.patch.object(self.kibana_plugin,
'_read_config',
side_effect=Exception('oh'))
with contextlib.nested(
patch_log_error,
patch_log_exception,
patch_read_config
) as (mock_log_error, mock_log_exception, _):
with mock.patch.object(LOG, 'error') as mock_log_error, \
mock.patch.object(LOG, 'exception') as mock_log_exception, \
mock.patch.object(self.kibana_plugin,
'_read_config',
side_effect=Exception('oh')) as _:
self.kibana_plugin.build_config()
self.assertEqual(mock_log_error.call_count, 1)
@ -192,15 +181,11 @@ class KibanaDetectionTest(unittest.TestCase):
def test_build_config_https_support(self):
config = ('localhost', 5700, 'https')
patch_log_error = mock.patch.object(LOG, 'error')
patch_read_config = mock.patch.object(self.kibana_plugin,
'_read_config',
return_value=config)
with contextlib.nested(
patch_log_error,
patch_read_config
) as (mock_log_error, _):
with mock.patch.object(LOG, 'error') as mock_log_error, \
mock.patch.object(self.kibana_plugin,
'_read_config',
return_value=config) as _:
self.assertIsNone(self.kibana_plugin.build_config())
self.assertEqual(mock_log_error.call_count, 1)
@ -209,19 +194,14 @@ class KibanaDetectionTest(unittest.TestCase):
def test_build_config_no_metric_support(self):
config = ('localhost', 5700, 'http')
patch_log_warning = mock.patch.object(LOG, 'warning')
patch_read_config = mock.patch.object(self.kibana_plugin,
'_read_config',
return_value=config)
has_metric_patch = mock.patch.object(self.kibana_plugin,
'_has_metrics_support',
return_value=False)
with contextlib.nested(
patch_log_warning,
patch_read_config,
has_metric_patch
) as (patch_log_warning, _, __):
with mock.patch.object(LOG, 'warning') as patch_log_warning,\
mock.patch.object(self.kibana_plugin,
'_read_config',
return_value=config) as _,\
mock.patch.object(self.kibana_plugin,
'_has_metrics_support',
return_value=False) as __:
self.assertIsNone(self.kibana_plugin.build_config())
self.assertEqual(patch_log_warning.call_count, 1)
@ -249,30 +229,26 @@ class KibanaDetectionTest(unittest.TestCase):
fixture_file = (os.path.dirname(os.path.abspath(__file__))
+ '/../checks_d/fixtures/test_kibana.json')
response = json.load(file(fixture_file))
response = json.load(open(fixture_file))
get_metric_req_ret = mock.Mock(
wraps=JsonResponse(response)
)
patch_read_config = mock.patch.object(self.kibana_plugin,
'_read_config',
return_value=kibana_cfg)
has_metric_patch = mock.patch.object(self.kibana_plugin,
'_has_metrics_support',
return_value=True)
get_metrics_patch = mock.patch.object(self.kibana_plugin,
'_get_metrics_request',
return_value=get_metric_req_ret)
self.kibana_plugin.args = {'kibana-user': kibana_user}
with contextlib.nested(patch_read_config,
has_metric_patch,
get_metrics_patch):
with mock.patch.object(self.kibana_plugin,
'_read_config',
return_value=kibana_cfg) as patch_read_config,\
mock.patch.object(self.kibana_plugin,
'_has_metrics_support',
return_value=True) as has_metrics_patch,\
mock.patch.object(self.kibana_plugin,
'_get_metrics_request',
return_value=get_metric_req_ret) as get_metrics_patch:
conf = self.kibana_plugin.build_config()
self.assertIsNotNone(conf)
self.assertItemsEqual(['kibana', 'process'], conf.keys())
six.assertCountEqual(self, ['kibana', 'process'], conf.keys())
self._verify_kibana_conf(conf['kibana'], kibana_url)
self._verify_process_conf(conf['process'], kibana_user)

View File

@ -17,6 +17,7 @@ import unittest
import logging
import mock
import psutil
import six
from monasca_setup.detection.plugins import mon
@ -185,8 +186,12 @@ class TestMonPersisterDetectionPlugin(unittest.TestCase):
admin_port=6666
)
mod = "builtins.open"
if six.PY2:
mod = "__builtin__.open"
with mock.patch(
"__builtin__.open",
mod,
mock.mock_open(read_data=yml_cfg)) as mf:
self._detect(retval=[fake_processes])
mf.assert_called_once_with('/etc/monasca/persister-config.yml',
@ -255,8 +260,12 @@ class TestMonPersisterDetectionPlugin(unittest.TestCase):
admin_port=admin_port
)
mod = "builtins.open"
if six.PY2:
mod = "__builtin__.open"
with mock.patch(
"__builtin__.open",
mod,
mock.mock_open(read_data=yml_cfg)) as mf:
self._detect(retval=[fake_processes])
conf = self._build_config()
@ -491,8 +500,12 @@ class TestMonAPIDetectionPlugin(unittest.TestCase):
hibernate_enabled=False
)
mod = "builtins.open"
if six.PY2:
mod = "__builtin__.open"
with mock.patch(
"__builtin__.open",
mod,
mock.mock_open(read_data=yml_cfg)) as mock_file:
self._detect(retval=[fake_processes])
mock_file.assert_called_once_with('/etc/monasca/api-config.yml',
@ -606,8 +619,12 @@ class TestMonAPIDetectionPlugin(unittest.TestCase):
hibernate_enabled=hibernate_enabled
)
mod = "builtins.open"
if six.PY2:
mod = "__builtin__.open"
with mock.patch(
"__builtin__.open",
mod,
mock.mock_open(read_data=yml_cfg)) as mf:
self._detect(retval=[fake_processes])
conf = self._build_config()

View File

@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import os
import psutil
@ -61,17 +60,17 @@ class TestOvs(unittest.TestCase):
def _detect(self, ovs_obj, file_config_valid=True):
ovs_obj.neutron_conf = None
ovs_obj.available = False
with contextlib.nested(
patch.object(cfg, 'CONF'),
with patch.object(cfg, 'CONF') as mock_conf, \
patch.object(psutil, 'process_iter',
return_value=[ps_util_get_proc()]),
patch.object(os.path, 'isfile', return_value=True),
patch.object(ovs_obj, 'dependencies_installed',
return_value=True),
return_value=[ps_util_get_proc()]) \
as mock_process_iter, \
patch.object(os.path, 'isfile', return_value=True) \
as mock_isfile,\
patch.object(ovs_obj, 'dependencies_installed', return_value=True) \
as dependencies,\
patch.object(ovs_obj, '_is_neutron_conf_valid',
return_value=file_config_valid)) as (
mock_conf, mock_process_iter,
mock_isfile, dependencies, _):
return_value=file_config_valid) as _:
ovs_obj._detect()
self.assertTrue(mock_process_iter.called)
if not ps_util_get_proc.cmdLine:
@ -83,15 +82,13 @@ class TestOvs(unittest.TestCase):
ovs_obj.conf.default_config_dirs = os.path.abspath(os.path.join(ovs_obj.neutron_conf, os.pardir))
with patch.object(configparser, 'SafeConfigParser') as mock_config_parser:
config_parser_obj = mock_config_parser.return_value
with contextlib.nested(
patch.object(cfg, 'CONF'),
patch.object(LOG, 'info'),
with patch.object(cfg, 'CONF') as mock_conf, \
patch.object(LOG, 'info') as mock_log_info,\
patch.object(ovs_obj, 'has_option',
side_effect=self.has_option),
side_effect=self.has_option) as mock_has_option, \
patch.object(ovs_obj, 'get_option',
side_effect=self.get_value)) as (
mock_conf, mock_log_info,
mock_has_option, mock_get):
side_effect=self.get_value) as mock_get:
result = ovs_obj.build_config()
if dependencies_installed:
self.assertTrue(mock_log_info.called)
@ -181,17 +178,15 @@ class TestOvs(unittest.TestCase):
def test_detect_conf_file_path_given(self):
self.ovs_obj.neutron_conf = None
self.ovs_obj.args = {'conf_file_path': '/opt/stack/neutron.conf'}
with contextlib.nested(
patch.object(utils, 'load_oslo_configuration'),
with patch.object(utils, 'load_oslo_configuration') as mock_conf, \
patch.object(psutil, 'process_iter',
return_value=[ps_util_get_proc()]),
patch.object(os.path, 'isfile', return_value=True),
return_value=[ps_util_get_proc()]) as mock_process_iter, \
patch.object(os.path, 'isfile', return_value=True) as mock_isfile, \
patch.object(self.ovs_obj, 'dependencies_installed',
return_value=True),
return_value=True) as dependencies, \
patch.object(self.ovs_obj, '_is_neutron_conf_valid',
return_value=True)) as (
mock_conf, mock_process_iter,
mock_isfile, dependencies, _):
return_value=True) as _:
self.ovs_obj._detect()
self.assertTrue(mock_isfile.called)
self.assertTrue(self.ovs_obj.available)
@ -285,10 +280,9 @@ class TestOvs(unittest.TestCase):
self.ovs_obj.neutron_conf = 'neutron-conf'
self.ovs_obj.args = {'included_interface_re': '[',
'neutron_refresh': 13000}
with contextlib.nested(
patch.object(re, 'compile', side_effect=re.error),
patch.object(LOG, 'exception')) as (
mock_re_error, mock_log):
with patch.object(re, 'compile', side_effect=re.error('error')) as mock_re_error, \
patch.object(LOG, 'exception') as mock_log:
self.assertRaises(Exception, self._build_config_with_arg, self.ovs_obj)
self.assertTrue(mock_re_error.called)
self.assertTrue(mock_log.called)

View File

@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import os
import psutil
@ -56,9 +55,8 @@ class TestProcessCheck(unittest.TestCase):
isfile_patch = patch.object(os.path, 'isfile',
return_value=config_is_file)
with contextlib.nested(process_iter_patch,
isfile_patch) as (
mock_process_iter, mock_isfile):
with process_iter_patch as mock_process_iter, \
isfile_patch as mock_isfile:
proc_plugin._detect()
if by_process_name:
self.assertTrue(mock_process_iter.called)
@ -159,10 +157,11 @@ class TestProcessCheck(unittest.TestCase):
'vertica')
def test_input_yaml_file(self):
# note: The previous tests will cover all yaml data variations, since the data is translated into a single dictionary.
fd, temp_path = tempfile.mkstemp(suffix='.yaml')
os.write(fd, '---\nprocess_config:\n- process_username: dbadmin\n dimensions:\n '
'service: monitoring\n component: vertica\n')
# note: The previous tests will cover all yaml data variations, since
# the data is translated into a single dictionary.
fd, temp_path = tempfile.mkstemp(suffix='.yaml', text=True)
os.write(fd, b'---\nprocess_config:\n- process_username: dbadmin\n dimensions:\n '
b'service: monitoring\n component: vertica\n')
self.proc_plugin.args = {'conf_file_path': temp_path}
self.proc_plugin._detect()
result = self.proc_plugin.build_config()

View File

@ -276,7 +276,7 @@ class TestMetricsAggregator(unittest.TestCase):
dimensions = {'A': 'B', 'B': 'C', 'D': 'E'}
value_meta_value = ""
num_value_meta = 10
for i in range(0, metric_validator.VALUE_META_VALUE_MAX_LENGTH/num_value_meta):
for i in range(0, metric_validator.VALUE_META_VALUE_MAX_LENGTH//num_value_meta):
value_meta_value = '{}{}'.format(value_meta_value, '1')
value_meta = {}

View File

@ -86,11 +86,16 @@ class TestDynamicCheckHelper(unittest.TestCase):
def testMeasurements(self):
metrics = self.run_check()
for m in metrics:
print "metric: {0}, dimensions: {1}".format(m['measurement']['name'], repr(m['measurement']['dimensions']))
metric1 = sorted(filter(lambda m: m['measurement']['name'] == 'dynhelper.messages_avg', metrics))
metric2 = sorted(filter(lambda m: m['measurement']['name'] == 'dynhelper.messages_total', metrics))
metric3 = sorted(filter(lambda m: m['measurement']['name'] == 'dynhelper.testgroup.req_responses_ok', metrics))
metric4 = sorted(filter(lambda m: m['measurement']['name'] == 'dynhelper.testgroup.sec_auth_total', metrics))
print("metric: {0}, dimensions: {1}".format(m['measurement']['name'], repr(m['measurement']['dimensions'])))
def sortfilter(name):
filterd = filter(lambda m: m['measurement']['name'] == name, metrics)
return sorted(filterd, key=lambda m:m['measurement']['timestamp'])
metric1 = sortfilter('dynhelper.messages_avg')
metric2 = sortfilter('dynhelper.messages_total')
metric3 = sortfilter('dynhelper.testgroup.req_responses_ok')
metric4 = sortfilter('dynhelper.testgroup.sec_auth_total')
self.assertTrue(len(metric1) > 0,
'gauge dynhelper.messages_avg missing in metric list {0}'.format(repr(metrics)))
self.assertEqual(metric1[0]['measurement']['dimensions'],

View File

@ -13,6 +13,7 @@
import mock
import os.path
import six
import tempfile
import unittest
@ -80,7 +81,7 @@ class TestConfig(unittest.TestCase):
conf_file = os.path.join(tempdir, 'agent.yaml')
with open(conf_file, 'wb') as fd:
fd.write(
"""
b"""
Logging:
collector_log_file: /var/log/monasca/agent/collector.log
forwarder_log_file: /var/log/monasca/agent/forwarder.log
@ -127,5 +128,5 @@ class TestConfig(unittest.TestCase):
def test_verify_common_config_opts(self):
opts = util.get_parsed_args(prog='test')
opts_dict = vars(opts[0])
self.assertItemsEqual(['config_file', 'clean', 'verbose'],
opts_dict.keys())
six.assertCountEqual(self, ['config_file', 'clean', 'verbose'],
opts_dict.keys())

View File

@ -69,7 +69,7 @@ class TestMetrics(unittest.TestCase):
counter = metrics.Counter(metric_name, dimensions, tenant_name)
# single counter value
# single counter value
counter.sample(5, SAMPLE_RATE, 1)
envelope = counter.flush()[0]
@ -80,9 +80,9 @@ class TestMetrics(unittest.TestCase):
self.assertEqual(measurement['value'], 5)
self.assertEqual(measurement['timestamp'], 1000)
# multiple counter value with different timestamps: add
# multiple counter value with different timestamps: add
counter.sample(5, SAMPLE_RATE, 1)
counter.sample(6, SAMPLE_RATE, 2)
counter.sample(6, SAMPLE_RATE, 2)
envelope = counter.flush()[0]
measurement = envelope['measurement']
@ -116,11 +116,11 @@ class TestMetrics(unittest.TestCase):
rate = metrics.Rate(metric_name, dimensions, tenant_name)
# single sample without predecessor: no rate can be calculated
# single sample without predecessor: no rate can be calculated
rate.sample(5, SAMPLE_RATE, 1)
self.assertEqual(rate.flush(), [])
# zero difference between samples: rate 0
# zero difference between samples: rate 0
rate.sample(5, SAMPLE_RATE, 2)
envelope = rate.flush()[0]
@ -131,7 +131,7 @@ class TestMetrics(unittest.TestCase):
self.assertEqual(measurement['value'], 0.0)
self.assertEqual(measurement['timestamp'], 2000)
# samples (5,10) in 1 sec interval: rate 5/sec.
# samples (5,10) in 1 sec interval: rate 5/sec.
rate.sample(10, SAMPLE_RATE, 3)
envelope = rate.flush()[0]
@ -142,11 +142,11 @@ class TestMetrics(unittest.TestCase):
self.assertEqual(measurement['value'], 5)
self.assertEqual(measurement['timestamp'], 3000)
# conflicting values for same timestamp: no result, but keep last sample for next rate calc.
# conflicting values for same timestamp: no result, but keep last sample for next rate calc.
rate.sample(12, SAMPLE_RATE, 3)
self.assertEqual(rate.flush(), [])
# zero difference between samples, incomplete previous interval T: rate 0/sec.
# zero difference between samples, incomplete previous interval T: rate 0/sec.
rate.sample(12, SAMPLE_RATE, 4)
envelope = rate.flush()[0]
@ -157,9 +157,9 @@ class TestMetrics(unittest.TestCase):
self.assertEqual(measurement['value'], 0.0)
self.assertEqual(measurement['timestamp'], 4000)
# several samples (13, 14) in interval, take last values of T1 and T0 for rate calc: rate = (14-12)/(6-4)
# several samples (13, 14) in interval, take last values of T1 and T0 for rate calc: rate = (14-12)/(6-4)
rate.sample(13, SAMPLE_RATE, 5)
rate.sample(14, SAMPLE_RATE, 6)
rate.sample(14, SAMPLE_RATE, 6)
envelope = rate.flush()[0]
measurement = envelope['measurement']
@ -169,7 +169,7 @@ class TestMetrics(unittest.TestCase):
self.assertEqual(measurement['value'], 1)
self.assertEqual(measurement['timestamp'], 6000)
# negative rate: often result of a restart, but that should not be hidden
# negative rate: often result of a restart, but that should not be hidden
rate.sample(1, SAMPLE_RATE, 7)
envelope = rate.flush()[0]
@ -180,8 +180,8 @@ class TestMetrics(unittest.TestCase):
self.assertEqual(measurement['value'], -13)
self.assertEqual(measurement['timestamp'], 7000)
# recover from negative rate
rate.sample(2, SAMPLE_RATE, 8)
# recover from negative rate
rate.sample(2, SAMPLE_RATE, 8)
envelope = rate.flush()[0]
measurement = envelope['measurement']
@ -190,4 +190,3 @@ class TestMetrics(unittest.TestCase):
self.assertEqual(measurement['dimensions'], dimensions)
self.assertEqual(measurement['value'], 1)
self.assertEqual(measurement['timestamp'], 8000)

View File

@ -28,6 +28,12 @@ commands =
{[testenv]commands}
stestr run {posargs}
[testenv:py36]
basepython = python3.6
commands =
{[testenv]commands}
stestr run {posargs}
[testenv:cover]
basepython = python3
commands =