fix tox python3 overrides

We want to default to running all tox environments under python 3, so
set the basepython value in each environment.

We do not want to specify a minor version number, because we do not
want to have to update the file every time we upgrade python.

We do not want to set the override once in testenv, because that
breaks the more specific versions used in default environments like
py35 and py36.

Change-Id: I12967d5f5e707efe2b271b28bc7ea4b40e7f1c15
This commit is contained in:
Adrian Czarnecki 2018-06-12 13:01:54 +02:00
parent 89e563194e
commit 96f08da015
17 changed files with 49 additions and 35 deletions

View File

@ -16,6 +16,7 @@ import os
import re
from datetime import datetime
from six import text_type
import monasca_agent.collector.checks as checks
@ -77,7 +78,7 @@ class Crash(checks.AgentCheck):
# Return the date-/timestamp of the most recent crash
if dump_count == 1:
value_meta = {'latest': unicode(dt)}
value_meta = {'latest': text_type(dt)}
log.debug('dump_count: %s', dump_count)
self.gauge('crash.dump_count', dump_count, dimensions=dimensions,

View File

@ -147,7 +147,7 @@ class Disk(checks.AgentCheck):
# automatically ignore filesystems not backed by a device
try:
for nodevfs in filter(lambda x: x.startswith('nodev\t'), file('/proc/filesystems')):
for nodevfs in filter(lambda x: x.startswith('nodev\t'), open('/proc/filesystems')):
file_system_list.add(nodevfs.partition('\t')[2].strip())
except IOError:
log.debug('Failed reading /proc/filesystems')

View File

@ -204,10 +204,10 @@ class Docker(checks.AgentCheck):
container_network_dimensions['interface'] = interface_name
network_values = cols[1].split()
self._report_rate_gauge_metric(
"container.net.in_bytes", long(
"container.net.in_bytes", int(
network_values[0]), container_network_dimensions)
self._report_rate_gauge_metric(
"container.net.out_bytes", long(
"container.net.out_bytes", int(
network_values[8]), container_network_dimensions)
break
except Exception as e:

View File

@ -16,6 +16,8 @@ from glob import glob
import os
import time
from six import moves
try:
from xml.etree.ElementTree import ElementTree
except ImportError:
@ -101,7 +103,7 @@ class Jenkins(AgentCheck):
if len(dirs) > 0:
dirs = sorted(dirs, reverse=True)
# We try to get the last valid build
for index in xrange(0, len(dirs) - 1):
for index in moves.range(0, len(dirs) - 1):
dir_name = dirs[index]
try:
timestamp = self._extract_timestamp(dir_name)

View File

@ -52,7 +52,7 @@ class WrapMK(AgentCheck):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.connect(socket_path)
except socket.error, (err):
except socket.error as err:
self.log.error("Error connecting to {0}: {1}".format(socket_path,
str(err)))
sys.exit(1)

View File

@ -17,8 +17,9 @@ import subprocess
import sys
import traceback
import monasca_agent.collector.checks as checks
from six import text_type
import monasca_agent.collector.checks as checks
GAUGE = "gauge"
RATE = "rate"
@ -254,7 +255,7 @@ class MySql(checks.AgentCheck):
return self._collect_type(key, dict, float)
def _collect_string(self, key, dict):
return self._collect_type(key, dict, unicode)
return self._collect_type(key, dict, text_type)
def _collect_type(self, key, dict, the_type):
self.log.debug("Collecting data with %s" % key)
@ -307,7 +308,7 @@ class MySql(checks.AgentCheck):
def _collect_system_metrics(self, host, db, dimensions):
pid = None
# The server needs to run locally, accessed by TCP or socket
if host in ["localhost", "127.0.0.1"] or db.port == long(0):
if host in ["localhost", "127.0.0.1"] or db.port == int(0):
pid = self._get_server_pid(db)
if pid:

View File

@ -35,9 +35,9 @@ class Varnish(AgentCheck):
if name == "stat":
m_name = self.normalize(self._current_metric)
if self._current_type in ("a", "c"):
self.rate(m_name, long(self._current_value))
self.rate(m_name, int(self._current_value))
elif self._current_type in ("i", "g"):
self.gauge(m_name, long(self._current_value))
self.gauge(m_name, int(self._current_value))
else:
# Unsupported data type, ignore
self._reset()
@ -53,7 +53,7 @@ class Varnish(AgentCheck):
data = data.strip()
if len(data) > 0 and self._current_element != "":
if self._current_element == "value":
self._current_value = long(data)
self._current_value = int(data)
elif self._current_element == "flag":
self._current_type = data
else:

View File

@ -222,8 +222,8 @@ class VCenterCheck(AgentCheck):
def _process_storage_data(self, datastore_stats, managed_cluster):
shared_ds = self._get_shared_datastores(datastore_stats,
managed_cluster)
capacity = 0L
freeSpace = 0L
capacity = 0
freeSpace = 0
self.log.info("Polling for the datastores: " + str(shared_ds))
for object_contents in datastore_stats:
for object_content in object_contents[1]:
@ -237,12 +237,12 @@ class VCenterCheck(AgentCheck):
"of datastore: %s in cluster: "
"%s" %
(ds_mor, managed_cluster))
capacity += long(propSet.val)
capacity += int(propSet.val)
elif propSet.name == 'summary.freeSpace':
self.log.debug("Calculating freeSpace of "
"datastore: %s in cluster: %s"
% (ds_mor, managed_cluster))
freeSpace += long(propSet.val)
freeSpace += int(propSet.val)
usedSpace = capacity - freeSpace
self.log.debug("Total capacity:" + str(capacity) +
" used:" + str(usedSpace) + " free:" + str(freeSpace))

View File

@ -136,11 +136,11 @@ class Zookeeper(AgentCheck):
# Received: 101032173
_, value = buf.readline().split(':')
metrics.append(('zookeeper.in_bytes', long(value.strip())))
metrics.append(('zookeeper.in_bytes', int(value.strip())))
# Sent: 1324
_, value = buf.readline().split(':')
metrics.append(('zookeeper.out_bytes', long(value.strip())))
metrics.append(('zookeeper.out_bytes', int(value.strip())))
if has_connections_val:
# Connections: 1
@ -153,12 +153,12 @@ class Zookeeper(AgentCheck):
# Outstanding: 0
_, value = buf.readline().split(':')
metrics.append(('zookeeper.outstanding_bytes', long(value.strip())))
metrics.append(('zookeeper.outstanding_bytes', int(value.strip())))
# Zxid: 0x1034799c7
_, value = buf.readline().split(':')
# Parse as a 64 bit hex int
zxid = long(value.strip(), 16)
zxid = int(value.strip(), 16)
# convert to bytes
zxid_bytes = struct.pack('>q', zxid)
# the higher order 4 bytes is the epoch
@ -175,6 +175,6 @@ class Zookeeper(AgentCheck):
# Node count: 487
_, value = buf.readline().split(':')
metrics.append(('zookeeper.node_count', long(value.strip())))
metrics.append(('zookeeper.node_count', int(value.strip())))
return metrics, dimensions

View File

@ -103,7 +103,7 @@ class JMXFetch(object):
'timestamp': time.time(),
'invalid_checks': invalid_checks
}
stream = file(os.path.join(tempfile.gettempdir(), PYTHON_JMX_STATUS_FILE), 'w')
stream = open(os.path.join(tempfile.gettempdir(), PYTHON_JMX_STATUS_FILE), 'w')
yaml.dump(data, stream)
stream.close()

View File

@ -14,6 +14,7 @@
# under the License.
"""Implementation of Inspector abstraction for Hyper-V"""
from oslo_utils import units
from monasca_agent.collector.virt.hyperv import utilsv2
@ -40,7 +41,7 @@ class HyperVInspector(virt_inspector.Inspector):
cpu_percent_used = (cpu_clock_used /
float(host_cpu_clock * cpu_count))
# Nanoseconds
cpu_time = (long(uptime * cpu_percent_used) * units.k)
cpu_time = (int(uptime * cpu_percent_used) * units.k)
return virt_inspector.CPUStats(number=cpu_count, time=cpu_time)

View File

@ -86,11 +86,11 @@ class UtilsV2(object):
cpu_used = 0
if cpu_metric_aggr:
cpu_used = long(cpu_metric_aggr[0].MetricValue)
cpu_used = int(cpu_metric_aggr[0].MetricValue)
return (cpu_used,
int(cpu_sd.VirtualQuantity),
long(vm.OnTimeInMilliseconds))
int(vm.OnTimeInMilliseconds))
def get_memory_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
@ -98,7 +98,7 @@ class UtilsV2(object):
metric_memory = self._get_metrics(vm, memory_def)
memory_usage = 0
if metric_memory:
memory_usage = long(metric_memory[0].MetricValue)
memory_usage = int(metric_memory[0].MetricValue)
return memory_usage
def get_vnic_metrics(self, vm_name):
@ -180,7 +180,7 @@ class UtilsV2(object):
def _sum_metric_values(metrics):
tot_metric_val = 0
for metric in metrics:
tot_metric_val += long(metric.MetricValue)
tot_metric_val += int(metric.MetricValue)
return tot_metric_val
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):

View File

@ -129,7 +129,7 @@ class XenapiInspector(virt_inspector.Inspector):
metrics_rec = self._call_xenapi("VM_metrics.get_record",
metrics_ref)
# Stat provided from XenServer is in B, converting it to MB.
memory = long(metrics_rec['memory_actual']) / units.Mi
memory = int(metrics_rec['memory_actual']) / units.Mi
return virt_inspector.MemoryUsageStats(usage=memory)
def inspect_vnic_rates(self, instance, duration=None):

View File

@ -132,9 +132,9 @@ class Daemon(object):
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
se = open(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
@ -251,7 +251,7 @@ class Daemon(object):
def pid(self):
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
return pid

View File

@ -32,6 +32,7 @@ import uuid
import logging
import logging.handlers
from six import integer_types
from numbers import Number
@ -44,7 +45,7 @@ VALID_HOSTNAME_RFC_1123_PATTERN = re.compile(
MAX_HOSTNAME_LEN = 255
LOGGING_MAX_BYTES = 5 * 1024 * 1024
NumericTypes = (float, int, long)
NumericTypes = (float,) + integer_types
import monasca_agent.common.config as configuration
from monasca_agent.common.exceptions import PathNotFound

View File

@ -24,6 +24,8 @@ import signal
import socket
import sys
from six import text_type
# set up logging before importing any other components
import monasca_agent.common.util as util
util.initialize_logging('forwarder')
@ -43,6 +45,7 @@ import tornado.web
import monasca_agent.common.config as cfg
import monasca_agent.forwarder.api.monasca_api as mon
log = logging.getLogger('forwarder')
# Max amount of iterations to wait to meet min batch size before flushing
@ -219,10 +222,10 @@ def main():
skip_ssl_validation = False
use_simple_http_client = False
if unicode(tornado.options.options.sslcheck) == u"0":
if text_type(tornado.options.options.sslcheck) == u"0":
skip_ssl_validation = True
if unicode(tornado.options.options.use_simple_http_client) == u"1":
if text_type(tornado.options.options.use_simple_http_client) == u"1":
use_simple_http_client = True
# If we don't have any arguments, run the server.

View File

@ -37,6 +37,7 @@ commands =
coverage report
[testenv:bandit]
basepython = python3
commands =
# B101 - asserts used on purpose
# Following rules should be fixed in future
@ -60,17 +61,20 @@ commands =
bandit -r monasca_agent -n5 -s B101,B602,B603,B301,B303,B311,B403,B404,B405,B310,B320,B410,B411,B501,B504,B605,B607,B608 -x {toxinidir}/tests
[testenv:flake8]
basepython = python3
commands =
flake8 monasca_agent
flake8 monasca_setup
flake8 tests
[testenv:pep8]
basepython = python3
commands =
{[testenv:flake8]commands}
{[testenv:bandit]commands}
[testenv:venv]
basepython = python3
commands = {posargs}
[testenv:bindep]
@ -78,6 +82,7 @@ commands = {posargs}
# system dependencies are missing, since it's used to tell you what system
# dependencies are missing! This also means that bindep must be installed
# separately, outside of the requirements files.
basepython = python3
deps = bindep
commands = bindep test