Add python syntax check using OpenStack hacking rules

Change-Id: I42db1b87e216383009dbadc4fec4a1a7e580d898
This commit is contained in:
Swann Croiset 2016-02-11 16:19:41 +01:00
parent 1f54a530d1
commit 952fd4938d
13 changed files with 50 additions and 37 deletions

View File

@ -24,8 +24,7 @@ import collectd
class Base(object):
""" Base class for writing Python plugins.
"""
"""Base class for writing Python plugins."""
MAX_IDENTIFIER_LENGTH = 63
@ -54,8 +53,7 @@ class Base(object):
return
def itermetrics(self):
"""
Iterate over the collected metrics
"""Iterate over the collected metrics
This class must be implemented by the subclass and should yield dict
objects that represent the collected values. Each dict has 3 keys:
@ -96,8 +94,7 @@ class Base(object):
v.dispatch()
def execute(self, cmd, shell=True, cwd=None):
"""
Executes a program with arguments.
"""Executes a program with arguments.
Args:
cmd: a list of program arguments where the first item is the
@ -150,8 +147,7 @@ class Base(object):
return (stdout, stderr)
def execute_to_json(self, *args, **kwargs):
"""
Executes a program and decodes the output as a JSON string.
"""Executes a program and decodes the output as a JSON string.
See execute().
@ -165,8 +161,7 @@ class Base(object):
@staticmethod
def restore_sigchld():
"""
Restores the SIGCHLD handler for Python <= v2.6.
"""Restores the SIGCHLD handler for Python <= v2.6.
This should be provided to collectd as the init callback by plugins
that execute external programs.

View File

@ -49,7 +49,7 @@ class CephPerfSchema(object):
(stdout, stderr) = proc.communicate()
stdout = stdout.rstrip('\n')
except Exception as e:
print "Cannot execute command '%s': %s" % (cmd, str(e))
print("Cannot execute command '%s': %s" % (cmd, str(e)))
raise e
return json.loads(stdout)
@ -69,15 +69,15 @@ class CephPerfSchema(object):
def main():
script_name = os.path.basename(sys.argv[0])
if len(sys.argv) < 2 or len(sys.argv) > 3:
print "usage: %s <Ceph OSD socket> [namespace]" % script_name
print("usage: %s <Ceph OSD socket> [namespace]" % script_name)
else:
schema = CephPerfSchema(sys.argv[1])
collection = sys.argv[2] if len(sys.argv) == 3 else None
print "# File generated automatically by the %s script" % script_name
print "# Ceph version: %s" % schema.ceph_version()
print("# File generated automatically by the %s script" % script_name)
print("# Ceph version: %s" % schema.ceph_version())
for item in schema.itertypes():
if collection is None or item.collection == collection:
print item
print(item)
if __name__ == '__main__':
main()

View File

@ -24,8 +24,7 @@ RE_OSD_ID = re.compile(".*?osd\.(\d+)\.asok$")
class CephOSDPerfPlugin(base.CephBase):
""" Collect OSD performance counters of all OSD daemons running on the host.
"""
"""Collect OSD performance counters of OSD daemons running on the host."""
# Collect only metrics from the 'osd' namespace
PREFIXES = ('osd')
@ -46,9 +45,10 @@ class CephOSDPerfPlugin(base.CephBase):
@staticmethod
def convert_to_collectd_value(value):
# See for details
# https://www.mail-archive.com/ceph-users@lists.ceph.com/msg18705.html
if isinstance(value, dict):
if value['avgcount'] > 0:
# See https://www.mail-archive.com/ceph-users@lists.ceph.com/msg18705.html
return value['sum'] / value['avgcount']
else:
return 0.0

View File

@ -26,8 +26,7 @@ HEALTH_MAP = {
class CephMonPlugin(base.CephBase):
""" Collect states and information about ceph cluster and placement groups.
"""
""" Collect states and metrics about ceph cluster and placement groups."""
def __init__(self, *args, **kwargs):
super(CephMonPlugin, self).__init__(*args, **kwargs)

View File

@ -24,13 +24,13 @@ INTERVAL = openstack.INTERVAL
class APICheckPlugin(openstack.CollectdPlugin):
""" Class to check the status of OpenStack API services.
"""
"""Class to check the status of OpenStack API services."""
FAIL = 0
OK = 1
UNKNOWN = 2
# TODO: sahara, murano
# TODO(all): sahara, murano
CHECK_MAP = {
'keystone': {
'path': '/', 'expect': 300, 'name': 'keystone-public-api'},
@ -67,7 +67,8 @@ class APICheckPlugin(openstack.CollectdPlugin):
for service in catalog:
name = service['name']
if name not in self.CHECK_MAP:
self.logger.notice("No check found for service '%s', skipping it" % name)
self.logger.notice(
"No check found for service '%s', skipping it" % name)
status = self.UNKNOWN
else:
check = self.CHECK_MAP[name]

View File

@ -30,6 +30,7 @@ METRICS = ['number_of_nodes', 'active_primary_shards', 'active_primary_shards',
HEALTH_ON_ERROR = {'type_instance': 'health', 'values': HEALTH_MAP['red']}
class ElasticsearchClusterHealthPlugin(base.Base):
def __init__(self, *args, **kwargs):
super(ElasticsearchClusterHealthPlugin, self).__init__(*args, **kwargs)

View File

@ -22,8 +22,7 @@ INTERVAL = openstack.INTERVAL
class HypervisorStatsPlugin(openstack.CollectdPlugin):
""" Class to report the statistics on Nova hypervisors.
"""
""" Class to report the statistics on Nova hypervisors."""
VALUE_MAP = {
'current_workload': 'running_tasks',
'running_vms': 'running_instances',

View File

@ -21,7 +21,8 @@ import requests
NAME = 'influxdb'
METRICS_BY_NAME = {
'cluster': {
'writeShardPointsReq': ('cluster_write_shard_points_requests', 'gauge'),
'writeShardPointsReq': ('cluster_write_shard_points_requests',
'gauge'),
'writeShardReq': ('cluster_write_shard_requests', 'gauge')},
'httpd': {

View File

@ -49,8 +49,10 @@ class OSClient(object):
# but not on timeout and backoff time is not supported.
# (at this time we ship requests 2.2.1 and urllib3 1.6.1 or 1.7.1)
self.session = requests.Session()
self.session.mount('http://', requests.adapters.HTTPAdapter(max_retries=max_retries))
self.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=max_retries))
self.session.mount(
'http://', requests.adapters.HTTPAdapter(max_retries=max_retries))
self.session.mount(
'https://', requests.adapters.HTTPAdapter(max_retries=max_retries))
self.get_token()
@ -72,10 +74,9 @@ class OSClient(object):
{
'username': self.username,
'password': self.password
}
}
}
)
})
self.logger.info("Trying to get token from '%s'" % self.keystone_url)
r = self.make_request('post',
'%s/tokens' % self.keystone_url, data=data,

View File

@ -39,7 +39,7 @@ class CinderStatsPlugin(openstack.CollectdPlugin):
return d.get('status', 'unknown').lower()
def count_size_bytes(d):
return d.get('size', 0) * 10**9
return d.get('size', 0) * 10 ** 9
status = self.count_objects_group_by(volumes_details,
group_by_func=groupby)

View File

@ -92,7 +92,7 @@ class RabbitMqPlugin(base.Base):
mem_str = re.findall('{memory,\s+\[([^\]]+)\]\}', out)
# We are only interested by the total of memory used
# TODO: Get all informations about memory usage from mem_str
# TODO(all): Get all informations about memory usage from mem_str
try:
stats['used_memory'] = int(re.findall('total,([0-9]+)',
mem_str[0])[0])
@ -101,9 +101,11 @@ class RabbitMqPlugin(base.Base):
self.rabbitmqctl_bin)
if 'vm_memory_limit' in stats and 'used_memory' in stats:
stats['remaining_memory'] = stats['vm_memory_limit'] - stats['used_memory']
stats['remaining_memory'] = \
stats['vm_memory_limit'] - stats['used_memory']
if 'disk_free' in stats and 'disk_free_limit' in stats:
stats['remaining_disk'] = stats['disk_free'] - stats['disk_free_limit']
stats['remaining_disk'] = \
stats['disk_free'] - stats['disk_free_limit']
out, err = self.execute([self.rabbitmqctl_bin, '-q', 'cluster_status'],
shell=False)
@ -112,7 +114,7 @@ class RabbitMqPlugin(base.Base):
self.rabbitmqctl_bin)
return
# TODO: Need to be modified in case we are using RAM nodes.
# TODO(all): Need to be modified in case we are using RAM nodes.
status = CLUSTER_STATUS.findall(out)
if len(status) == 0:
self.logger.error('%s: Failed to parse (%s)' %

View File

@ -1,2 +1,4 @@
Sphinx
-e git+https://github.com/openstack/fuel-plugins.git#egg=fuel-plugin-builder
# Hacking already pins down pep8, pyflakes and flake8
hacking<0.11,>=0.10.0

14
tox.ini
View File

@ -1,5 +1,5 @@
[tox]
envlist = manifests,heka,lma_collector,docs,qa_docs,build_plugin
envlist = manifests,heka,lma_collector,docs,qa_docs,build_plugin,collectd_python
skipsdist = True
[testenv]
@ -39,6 +39,18 @@ commands =
bundle install --path {toxinidir}/.bundled_gems
bundle exec rake test
[flake8]
ignore = H105,H201,E241,H401
exclude = haproxy.py
show-source = True
[testenv:collectd_python]
changedir = {toxinidir}/deployment_scripts/puppet/modules/lma_collector/files/collectd
whitelist_externals =
flake8
commands =
flake8 .
[testenv:docs]
changedir = {toxinidir}/doc
whitelist_externals = make