Use statsd pipeline

With a few volumes, stats tend to go missing as there's a big flood of
individual packets.  Use the statsd pipeline which is meant for this
sort of batch reporting.

Test-case is updated to handle the batched messages which are
delineated by newlines.
This commit is contained in:
Ian Wienand 2018-06-05 09:58:52 +10:00
parent 9d29fbb8b7
commit 26a7a2c827
2 changed files with 21 additions and 10 deletions

View File

@ -53,29 +53,35 @@ class AFSMonCmd(object):
statsd_args['port']))
self.statsd = statsd.StatsClient(**statsd_args)
# With a lot of volumes, we can flood out a lot of stats
# quickly. Use a pipeline to batch.
pipe = self.statsd.pipeline()
for f in self.fileservers:
if f.status != afsmon.FileServerStatus.NORMAL:
continue
hn = f.hostname.replace('.', '_')
self.statsd.gauge('afs.%s.idle_threads' % hn, f.idle_threads)
self.statsd.gauge('afs.%s.calls_waiting' % hn, f.calls_waiting)
pipe.gauge('afs.%s.idle_threads' % hn, f.idle_threads)
pipe.gauge('afs.%s.calls_waiting' % hn, f.calls_waiting)
for p in f.partitions:
self.statsd.gauge(
pipe.gauge(
'afs.%s.part.%s.used' % (hn, p.partition), p.used)
self.statsd.gauge(
pipe.gauge(
'afs.%s.part.%s.free' % (hn, p.partition), p.free)
self.statsd.gauge(
pipe.gauge(
'afs.%s.part.%s.total' % (hn, p.partition), p.total)
for v in f.volumes:
if v.perms != 'RW':
continue
vn = v.volume.replace('.', '_')
self.statsd.gauge(
pipe.gauge(
'afs.%s.vol.%s.used' % (hn, vn), v.used)
self.statsd.gauge(
pipe.gauge(
'afs.%s.vol.%s.quota' % (hn, vn), v.quota)
pipe.send()
def main(self, args=None):
if args is None:
args = sys.argv[1:]

View File

@ -14,6 +14,7 @@
# under the License.
import fixtures
import itertools
import logging
import os
import select
@ -119,9 +120,13 @@ class TestCase(testtools.TestCase):
while time.time() < (start + 5):
# Note our fake statsd just queues up results in a queue.
# We just keep going through them until we find one that
# matches, or fail out.
for stat in self.statsd.stats:
k, v = stat.decode('utf-8').split(':')
# matches, or fail out. If a statsd pipeline is used, the
# elements are separated by newlines, so flatten out all
# the stats first.
stats = itertools.chain.from_iterable(
[s.decode('utf-8').split('\n') for s in self.statsd.stats])
for stat in stats:
k, v = stat.split(':')
if key == k:
if kind is None:
# key with no qualifiers is found