Only flush staging tables if enough time has elapsed since last flush.

This commit is contained in:
Deklan Dieterly 2014-03-10 09:01:19 -06:00
parent 9cf71380e1
commit a6ecbc35d1
5 changed files with 57 additions and 34 deletions

View File

@ -1,7 +1,6 @@
package com.hpcloud.dedupe;
import com.google.inject.Inject;
import com.hpcloud.configuration.MonPersisterConfiguration;
import com.hpcloud.disruptor.event.MetricMessageEvent;
import com.lmax.disruptor.EventTranslator;
import com.lmax.disruptor.dsl.Disruptor;
@ -13,16 +12,13 @@ public class MonDeDuperHeartbeat implements Managed {
private static Logger logger = LoggerFactory.getLogger(MonDeDuperHeartbeat.class);
private final MonPersisterConfiguration configuration;
private final Disruptor disruptor;
private final DeDuperRunnable deDuperRunnable;
@Inject
public MonDeDuperHeartbeat(MonPersisterConfiguration configuration,
Disruptor disruptor) {
this.configuration = configuration;
public MonDeDuperHeartbeat(Disruptor disruptor) {
this.disruptor = disruptor;
this.deDuperRunnable = new DeDuperRunnable(configuration, disruptor);
this.deDuperRunnable = new DeDuperRunnable(disruptor);
}
@ -41,24 +37,22 @@ public class MonDeDuperHeartbeat implements Managed {
private static Logger logger = LoggerFactory.getLogger(DeDuperRunnable.class);
private final MonPersisterConfiguration configuration;
private final Disruptor disruptor;
private DeDuperRunnable(MonPersisterConfiguration configuration, Disruptor disruptor) {
this.configuration = configuration;
private DeDuperRunnable(Disruptor disruptor) {
this.disruptor = disruptor;
}
@Override
public void run() {
int seconds = configuration.getMonDeDuperConfiguration().getDedupeRunFrequencySeconds();
for (; ; ) {
try {
Thread.sleep(seconds * 1000);
logger.debug("Waking up after sleeping " + seconds + " seconds, yawn...");
// Send a heartbeat every second.
Thread.sleep(1000);
logger.debug("Waking up after sleeping 1 seconds, yawn...");
// Send heartbeat
logger.debug("Sending dedupe heartbeat message");
logger.debug("Sending heartbeat message");
disruptor.publishEvent(new EventTranslator<MetricMessageEvent>() {
@Override
@ -69,7 +63,7 @@ public class MonDeDuperHeartbeat implements Managed {
});
} catch (Exception e) {
logger.error("Failed to send dedupe heartbeat", e);
logger.error("Failed to send heartbeat", e);
}
}

View File

@ -2,6 +2,7 @@ package com.hpcloud.disruptor.event;
import com.google.inject.Inject;
import com.google.inject.assistedinject.Assisted;
import com.hpcloud.configuration.MonPersisterConfiguration;
import com.hpcloud.message.MetricMessage;
import com.hpcloud.repository.VerticaMetricRepository;
import com.lmax.disruptor.EventHandler;
@ -31,21 +32,32 @@ public class MetricMessageEventHandler implements EventHandler<MetricMessageEven
private final SimpleDateFormat simpleDateFormat;
private long millisSinceLastFlush = System.currentTimeMillis();
private final long millisBetweenFlushes;
private final int secondsBetweenFlushes;
private final VerticaMetricRepository verticaMetricRepository;
private final MonPersisterConfiguration configuration;
private final Counter metricCounter = Metrics.newCounter(this.getClass(), "metrics-added-to-batch-counter");
private final Counter definitionCounter = Metrics.newCounter(this.getClass(), "metric-definitions-added-to-batch-counter");
private final Counter dimensionCounter = Metrics.newCounter(this.getClass(), "metric-dimensions-added-to-batch-counter");
private final Meter metricMessageMeter = Metrics.newMeter(this.getClass(), "Metric", "metrics-messages-processed-meter", TimeUnit.SECONDS);
private final Meter commitMeter = Metrics.newMeter(this.getClass(), "Metric", "commits-executed-meter", TimeUnit.SECONDS);
private final Timer commitTimer = Metrics.newTimer(this.getClass(), "commits-executed-timer");
private final Timer commitTimer = Metrics.newTimer(this.getClass(), "total-commit-and-flush-timer");
@Inject
public MetricMessageEventHandler(VerticaMetricRepository verticaMetricRepository,
MonPersisterConfiguration configuration,
@Assisted("ordinal") int ordinal,
@Assisted("numProcessors") int numProcessors,
@Assisted("batchSize") int batchSize) {
this.verticaMetricRepository = verticaMetricRepository;
this.configuration = configuration;
this.secondsBetweenFlushes = configuration.getMonDeDuperConfiguration().getDedupeRunFrequencySeconds();
this.millisBetweenFlushes = secondsBetweenFlushes * 1000;
this.ordinal = ordinal;
this.numProcessors = numProcessors;
this.batchSize = batchSize;
@ -53,14 +65,20 @@ public class MetricMessageEventHandler implements EventHandler<MetricMessageEven
simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
simpleDateFormat.setTimeZone(TimeZone.getTimeZone("GMT-0"));
}
@Override
public void onEvent(MetricMessageEvent metricMessageEvent, long sequence, boolean b) throws Exception {
if (metricMessageEvent.getMetricEnvelope() == null) {
logger.debug("Received heartbeat message. Flushing staging tables.");
verticaMetricRepository.flush();
logger.debug("Received heartbeat message. Checking last flush time.");
if (millisSinceLastFlush + millisBetweenFlushes < System.currentTimeMillis()) {
logger.debug("It's been more than " + secondsBetweenFlushes + " seconds since last flush. Flushing staging tables now...");
flush();
} else {
logger.debug("It has not been more than " + secondsBetweenFlushes + " seeconds since last flush. No need to perform flush at this time.");
}
return;
}
@ -124,11 +142,15 @@ public class MetricMessageEventHandler implements EventHandler<MetricMessageEven
}
if (sequence % batchSize == (batchSize - 1)) {
TimerContext context = commitTimer.time();
verticaMetricRepository.flush();
flush();
context.stop();
commitMeter.mark();
}
}
private void flush() {
verticaMetricRepository.flush();
millisSinceLastFlush = System.currentTimeMillis();
}
}

View File

@ -4,11 +4,6 @@ import com.google.common.base.Preconditions;
import java.util.Map;
/**
* A metric envelope.
*
* @author Jonathan Halterman
*/
public class MetricEnvelope {
public MetricMessage metric;
public Map<String, Object> meta;
@ -27,4 +22,12 @@ public class MetricEnvelope {
this.metric = metric;
this.meta = meta;
}
@Override
public String toString() {
return "MetricEnvelope{" +
"metric=" + metric +
", meta=" + meta +
'}';
}
}

View File

@ -1,5 +1,8 @@
package com.hpcloud.repository;
import com.yammer.metrics.Metrics;
import com.yammer.metrics.core.Timer;
import com.yammer.metrics.core.TimerContext;
import org.skife.jdbi.v2.DBI;
import org.skife.jdbi.v2.PreparedBatch;
import org.slf4j.Logger;
@ -16,12 +19,6 @@ public class VerticaMetricRepository extends VerticaRepository {
private static final String SQL_INSERT_INTO_METRICS =
"insert into MonMetrics.metrics (metric_definition_id, time_stamp, value) values (:metric_definition_id, :time_stamp, :value)";
private static final String SQL_INSERT_INTO_STAGING_DEFINITIONS =
"insert into MonMetrics.stagedDefinitions values (:metric_definition_id, :name, :tenant_id," +
":region)";
private static final String SQL_INSERT_INTO_STAGING_DIMENSIONS =
"insert into MonMetrics.stagedDimensions values (:metric_definition_id, :name, :value)";
private static final String defs = "(" +
" metric_definition_id BINARY(20) NOT NULL," +
" name VARCHAR NOT NULL," +
@ -46,6 +43,9 @@ public class VerticaMetricRepository extends VerticaRepository {
private final String dsDefs;
private final String dsDims;
private final Timer commitTimer = Metrics.newTimer(this.getClass(), "commits-timer");
private final Timer flushTimer = Metrics.newTimer(this.getClass(), "staging-tables-flushed-timer");
@Inject
public VerticaMetricRepository(DBI dbi) throws NoSuchAlgorithmException, SQLException {
super(dbi);
@ -93,12 +93,14 @@ public class VerticaMetricRepository extends VerticaRepository {
public void flush() {
commitBatch();
long startTime = System.currentTimeMillis();
TimerContext context = flushTimer.time();
handle.execute(dsDefs);
handle.execute("truncate table " + sDefs);
handle.execute(dsDims);
handle.execute("truncate table " + sDims);
handle.commit();
handle.begin();
context.stop();
long endTime = System.currentTimeMillis();
logger.debug("Flushing staging tables took " + (endTime - startTime) / 1000 + " seconds");
@ -106,11 +108,13 @@ public class VerticaMetricRepository extends VerticaRepository {
private void commitBatch() {
long startTime = System.currentTimeMillis();
TimerContext context = commitTimer.time();
metricsBatch.execute();
stagedDefinitionsBatch.execute();
stagedDimensionsBatch.execute();
handle.commit();
handle.begin();
context.stop();
long endTime = System.currentTimeMillis();
logger.debug("Commiting batch took " + (endTime - startTime) / 1000 + " seconds");
}

View File

@ -28,7 +28,7 @@ kafkaConfiguration:
disruptorConfiguration:
bufferSize: 1048576
numProcessors: 2
numProcessors: 1
verticaOutputProcessorConfiguration:
batchSize: 25000
@ -41,8 +41,8 @@ databaseConfiguration:
# url: jdbc:vertica://mon-aw1rdd1-vertica0001.rndd.aw1.hpcloud.net:5433/som
url: jdbc:vertica://15.185.94.245:5433/som
# user: persister
# user: mon_persister
user: dbadmin
user: mon_persister
# user: dbadmin
password: password
properties:
ssl: false
@ -81,8 +81,8 @@ logging:
# Sets the level for 'com.example.app' to DEBUG.
com.example.app: DEBUG
com.hpcloud: debug
com.hpcloud.repository: DEBUG
com.hpcloud.disruptor.event: INFO
# Settings for logging to stdout.
console: