removed temp files

This commit is contained in:
Derrick Johnson 2014-04-18 00:06:28 -04:00
parent f8f2748142
commit 42a8f62fbb
2 changed files with 0 additions and 193 deletions

View File

@ -1,104 +0,0 @@
# Whether this server is running on a secure port
accessedViaHttps: false
# Cloud service integration information
cloudServices:
hpcs.compute:
version: 2 #1.1
# API URL format with an optional placeholder for AZ
urlFormat: https://region-b.geo-1.compute.hpcloudsvc.com/v2 # https://region-a.geo-1.compute.hpcloudsvc.com/v1.1
port: 80
hpcs.object-store:
version: 1.0
urlFormat: https://region-a.geo-1.objects.hpcloudsvc.com/v1.0 # https://region-a.geo-1.compute.hpcloudsvc.com/v1
port: 80
# Identity (Control services)
identityService:
url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/tokens #https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/tokens
username: hpcs.maas
password: c4@ng3I7maaS
tenantId: 10684178218130
# Topic for publishing metrics to
metricsTopic: metrics
# Topic for publishing domain events to
eventsTopic: events
kafka:
brokerUris:
- 192.168.10.10:9092
zookeeperUris:
- 192.168.10.10:2181
healthCheckTopic: healthcheck
databaseVertica:
driverClass: com.vertica.jdbc.Driver
#url: jdbc:vertica://mon-aw1rdd1-vertica0002.rndd.aw1.hpcloud.net:5433/mon
url: jdbc:vertica://192.168.10.8:5433/mon
user: mon_api
password: password
properties:
charSet: UTF-8
ssl: false
# The maximum amount of time to wait on an empty pool before throwing an exception
maxWaitForConnection: 1s
# The SQL vertica to run when validating a connection's liveness
validationQuery: "/* MyService Health Check */ SELECT 1"
initialSize: 128
# The minimum number of connections to keep open
minSize: 128
# The maximum number of connections to keep open
maxSize: 128
# Whether or not idle connections should be validated
checkConnectionWhileIdle: false
# How long a connection must be held before it can be validated
#checkConnectionHealthWhenIdleFor: 10s
# The maximum lifetime of an idle connection
#closeConnectionIfIdleFor: 1 minute
database:
driverClass: com.mysql.jdbc.Driver
url: jdbc:mysql://localhost:3306/mon?connectTimeout=5000&autoReconnect=true
user: root
password:
maxWaitForConnection: 1s
validationQuery: "/* MyService Health Check */ SELECT 1"
minSize: 8
maxSize: 32
checkConnectionWhileIdle: false
jerseyClient:
gzipEnabledForRequests: false
middleware:
enabled: false
serviceIds: 100
endpointIds: 160
serverVIP: region-a.geo-1.identity-admin.hpcloudsvc.com #15.184.9.181
serverPort: 9543
connTimeout: 500
connSSLClientAuth: true
keystore: etc/security/prod-hpmiddleware-keystore.jks
keystorePass: changeit
truststore: etc/security/prod-hpmiddleware-truststore.jks
truststorePass: changeit
connPoolMaxActive: 3
connPoolMaxIdle: 3
connPoolEvictPeriod: 600000
connPoolMinIdleTime: 600000
connRetryTimes: 2
connRetryInterval: 50
rolesToMatch: [user, domainuser, domainadmin]

View File

@ -1,89 +0,0 @@
package com.hpcloud.mon.infrastructure.persistence;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Named;
import org.skife.jdbi.v2.DBI;
import org.skife.jdbi.v2.Handle;
import org.skife.jdbi.v2.Query;
import com.hpcloud.mon.common.model.metric.MetricDefinition;
import com.hpcloud.mon.domain.model.metric.MetricDefinitionRepository;
/**
* Vertica metric definition repository implementation.
*
* @author Jonathan Halterman
*/
public class MetricDefinitionRepositoryImpl implements MetricDefinitionRepository {
<<<<<<< HEAD
private static final String FIND_BY_METRIC_DEF_SQL = "select dd.id, def.name, d.name as dname, d.value as dvalue "
+ "from MonMetrics.Definitions def, MonMetrics.DefinitionDimensions dd "
+ "left outer join MonMetrics.Dimensions d on d.dimension_set_id = dd.dimension_set_id%s "
+ "where def.id = dd.definition_id and def.tenant_id = :tenantId%s order by dd.id";
=======
private static final String FIND_BY_METRIC_DEF_SQL = "select def.id, def.name, d.name as dname, d.value as dvalue "
+ "from MonMetrics.Definitions def, MonMetrics.Dimensions d%s "
+ "where def.tenant_id = :tenantId and d.definition_id = def.id%s order by def.id";
>>>>>>> updated to support Statistics
private final DBI db;
@Inject
public MetricDefinitionRepositoryImpl(@Named("vertica") DBI db) {
this.db = db;
}
@Override
public List<MetricDefinition> find(String tenantId, String name, Map<String, String> dimensions) {
Handle h = db.open();
try {
// Build sql
StringBuilder sbWhere = new StringBuilder();
if (name != null)
sbWhere.append(" and def.name = :name");
String sql = String.format(FIND_BY_METRIC_DEF_SQL,
MetricQueries.buildJoinClauseFor(dimensions), sbWhere);
// Build query
Query<Map<String, Object>> query = h.createQuery(sql).bind("tenantId", tenantId);
if (name != null)
query.bind("name", name);
MetricQueries.bindDimensionsToQuery(query, dimensions);
// Execute query
List<Map<String, Object>> rows = query.list();
// Build results
List<MetricDefinition> metricDefs = new ArrayList<>(rows.size());
byte[] currentId = null;
Map<String, String> dims = null;
for (Map<String, Object> row : rows) {
byte[] defId = (byte[]) row.get("id");
String metricName = (String) row.get("name");
String dName = (String) row.get("dname");
String dValue = (String) row.get("dvalue");
if (defId == null || !Arrays.equals(currentId, defId)) {
currentId = defId;
dims = new HashMap<>();
if (dName != null && dValue != null)
dims.put(dName, dValue);
metricDefs.add(new MetricDefinition(metricName, dims));
} else
dims.put(dName, dValue);
}
return metricDefs;
} finally {
h.close();
}
}
}