Convert all stacklight configs to nested

Change-Id: Ie12b0fdcc890c71893826f52560b5ab5117ca132
This commit is contained in:
Yuriy Taraday 2016-10-07 18:25:30 +03:00
parent 3f8ea9410b
commit bcb332548d
17 changed files with 57 additions and 49 deletions

View File

@ -1,8 +1,8 @@
service:
name: elasticsearch
ports:
- {{ elasticsearch_port }}
- {{ elasticsearch_cluster_port }}
- {{ elasticsearch.port }}
- {{ elasticsearch.cluster_port }}
containers:
- name: elasticsearch
image: elasticsearch

View File

@ -1,12 +1,12 @@
{{ cron_rotate_interval }}
rotate {{ cron_rotate_days }}
{{ cron.rotate.interval }}
rotate {{ cron.rotate.days }}
copytruncate
compress
delaycompress
notifempty
missingok
minsize {{ cron_rotate_minsize }}
maxsize {{ cron_rotate_maxsize }}
minsize {{ cron.rotate.minsize }}
maxsize {{ cron.rotate.maxsize }}
include /etc/logrotate.d

View File

@ -5,7 +5,7 @@ actions:
1:
action: delete_indices
description: >-
Delete indices older than {{ elasticsearch_retention_period }} days
Delete indices older than {{ elasticsearch.retention_period }} days
(based on index name), for 'log-' prefixed indices. Ignore the error
if the filter does not result in an actionable list of indices
(ignore_empty_list) and exit cleanly.
@ -24,6 +24,6 @@ actions:
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ elasticsearch_retention_period }}
unit_count: {{ elasticsearch.retention_period }}
exclude:

View File

@ -4,7 +4,7 @@
client:
hosts:
- localhost
port: {{ elasticsearch_port }}
port: {{ elasticsearch.port }}
url_prefix:
use_ssl: False
certificate:

View File

@ -1,25 +1,33 @@
configs:
elasticsearch_port: 9200
elasticsearch_cluster_port: 9300
elasticsearch_retention_period: 30
kibana_port: 5601
heka_max_procs: 2
heka_service_pattern: "^k8s_(.-)%..*"
elasticsearch:
port: 9200
cluster_port: 9300
retention_period: 30
kibana:
port: 5601
heka:
max_procs: 2
service_pattern: "^k8s_(.-)%..*"
hindsight_heka_tcp_port: 5565
grafana_host: "grafana"
grafana_port: 3000
grafana_user: "admin"
grafana_password: "admin"
influxdb_database: "ccp"
influxdb_host: "influxdb"
influxdb_password: ""
influxdb_port: 8086
influxdb_user: ""
snap_log_level: 3
cron_rotate_interval: "daily"
cron_rotate_days: 6
cron_rotate_minsize: "1M"
cron_rotate_maxsize: "100M"
grafana:
host: "grafana"
port: 3000
user: "admin"
password: "admin"
influxdb:
database: "ccp"
host: "influxdb"
password: ""
port: 8086
user: ""
snap:
log_level: 3
cron:
rotate:
interval: "daily"
days: 6
minsize: "1M"
maxsize: "100M"
versions:
influxdb_version: "0.13.0"
grafana_version: "3.0.3-1463994644"

View File

@ -1,23 +1,23 @@
#!/bin/bash
echo "Waiting for Grafana to come up..."
until $(curl --fail --output /dev/null --silent http://{{ grafana_user }}:{{ grafana_password }}@localhost:{{ grafana_port }}/api/org); do
until $(curl --fail --output /dev/null --silent http://{{ grafana.user }}:{{ grafana.password }}@localhost:{{ grafana.port }}/api/org); do
printf "."
sleep 2
done
echo -e "Grafana is up and running.\n"
echo "Creating InfluxDB datasource..."
curl -i -XPOST -H "Accept: application/json" -H "Content-Type: application/json" "http://{{ grafana_user }}:{{ grafana_password }}@localhost:{{ grafana_port }}/api/datasources" -d '
curl -i -XPOST -H "Accept: application/json" -H "Content-Type: application/json" "http://{{ grafana.user }}:{{ grafana.password }}@localhost:{{ grafana.port }}/api/datasources" -d '
{
"name": "CCP InfluxDB",
"type": "influxdb",
"access": "proxy",
"isDefault": true,
"url": "'"http://{{ influxdb_host }}:{{ influxdb_port }}"'",
"password": "'"{{ influxdb_password }}"'",
"user": "'"{{ influxdb_user }}"'",
"database": "'"{{ influxdb_database }}"'"
"url": "'"http://{{ influxdb.host }}:{{ influxdb.port }}"'",
"password": "'"{{ influxdb.password }}"'",
"user": "'"{{ influxdb.user }}"'",
"database": "'"{{ influxdb.database }}"'"
}'
if [ $? -ne 0 ]; then
echo "Can not create InfluxDB datasource"
@ -28,7 +28,7 @@ echo -e "InfluxDB datasource was successfully created.\n"
echo "Importing default dashboards..."
for dashboard in /dashboards/*.json; do
echo -e "\tImporting ${dashboard}..."
curl -i -XPOST --data "@${dashboard}" -H "Accept: application/json" -H "Content-Type: application/json" "http://{{ grafana_user }}:{{ grafana_password}}@localhost:{{ grafana_port }}/api/dashboards/db"
curl -i -XPOST --data "@${dashboard}" -H "Accept: application/json" -H "Content-Type: application/json" "http://{{ grafana.user }}:{{ grafana.password}}@localhost:{{ grafana.port }}/api/dashboards/db"
if [ $? -ne 0 ]; then
echo "Error importing ${dashboard}"
exit 1

View File

@ -6,7 +6,7 @@ fields = ["Timestamp", "Type", "Logger", "Severity", "Payload", "Pid", "Hostname
[elasticsearch_output]
type = "ElasticSearchOutput"
server = "http://elasticsearch:{{ elasticsearch_port }}"
server = "http://elasticsearch:{{ elasticsearch.port }}"
message_matcher = "Type == 'log'"
encoder = "elasticsearch_json_encoder"
use_buffering = true

View File

@ -1,5 +1,5 @@
[hekad]
maxprocs = {{ heka_max_procs }}
maxprocs = {{ heka.max_procs }}
[debug_output]
type = "LogOutput"

View File

@ -3,4 +3,4 @@ type = "SandboxDecoder"
filename = "lua_decoders/os_openstack_log.lua"
[openstack_log_decoder.config]
heka_service_pattern = "{{ heka_service_pattern }}"
heka_service_pattern = "{{ heka.service_pattern }}"

View File

@ -3,4 +3,4 @@ type = "SandboxDecoder"
filename = "lua_decoders/os_ovs.lua"
[ovs_log_decoder.config]
heka_service_pattern = "{{ heka_service_pattern }}"
heka_service_pattern = "{{ heka.service_pattern }}"

View File

@ -1,7 +1,7 @@
filename = "influxdb_tcp.lua"
host = "influxdb"
port = {{ influxdb_port }}
database = "{{ influxdb_database }}"
port = {{ influxdb.port }}
database = "{{ influxdb.database }}"
batch_max_lines = 3000
message_matcher = "TRUE"
ticker_interval = 10

View File

@ -13,5 +13,5 @@ reporting-disabled = true
[http]
auth-enabled = false # FIXME(elemoine)
bind-address = "{{ network_topology["private"]["address"] }}:{{ influxdb_port }}"
bind-address = "{{ network_topology["private"]["address"] }}:{{ influxdb.port }}"
log-enabled = false

View File

@ -1,11 +1,11 @@
# Kibana is served by a back end server. This controls which port to use.
port: {{ kibana_port }}
port: {{ kibana.port }}
# The host to bind the server to.
host: "{{ network_topology["private"]["address"] }}"
# The Elasticsearch instance to use for all your queries.
elasticsearch_url: "http://elasticsearch:{{ elasticsearch_port }}"
elasticsearch_url: "http://elasticsearch:{{ elasticsearch.port }}"
# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
# then the host you use to connect to *this* Kibana instance will be sent.

View File

@ -1,4 +1,4 @@
log_level: {{ snap_log_level }}
log_level: {{ snap.log_level }}
control:
plugin_load_timeout: 15
plugin_trust_level: 0

View File

@ -1,7 +1,7 @@
service:
name: grafana
ports:
- {{ grafana_port }}
- {{ grafana.port }}
containers:
- name: grafana
image: grafana

View File

@ -1,7 +1,7 @@
service:
name: influxdb
ports:
- {{ influxdb_port }}
- {{ influxdb.port }}
containers:
- name: influxdb
image: influxdb

View File

@ -1,7 +1,7 @@
service:
name: kibana
ports:
- {{ kibana_port }}
- {{ kibana.port }}
containers:
- name: kibana
image: kibana