Convert all stacklight configs to nested

Change-Id: Ie12b0fdcc890c71893826f52560b5ab5117ca132
This commit is contained in:
Yuriy Taraday 2016-10-07 18:25:30 +03:00
parent 3f8ea9410b
commit bcb332548d
17 changed files with 57 additions and 49 deletions

View File

@ -1,8 +1,8 @@
service: service:
name: elasticsearch name: elasticsearch
ports: ports:
- {{ elasticsearch_port }} - {{ elasticsearch.port }}
- {{ elasticsearch_cluster_port }} - {{ elasticsearch.cluster_port }}
containers: containers:
- name: elasticsearch - name: elasticsearch
image: elasticsearch image: elasticsearch

View File

@ -1,12 +1,12 @@
{{ cron_rotate_interval }} {{ cron.rotate.interval }}
rotate {{ cron_rotate_days }} rotate {{ cron.rotate.days }}
copytruncate copytruncate
compress compress
delaycompress delaycompress
notifempty notifempty
missingok missingok
minsize {{ cron_rotate_minsize }} minsize {{ cron.rotate.minsize }}
maxsize {{ cron_rotate_maxsize }} maxsize {{ cron.rotate.maxsize }}
include /etc/logrotate.d include /etc/logrotate.d

View File

@ -5,7 +5,7 @@ actions:
1: 1:
action: delete_indices action: delete_indices
description: >- description: >-
Delete indices older than {{ elasticsearch_retention_period }} days Delete indices older than {{ elasticsearch.retention_period }} days
(based on index name), for 'log-' prefixed indices. Ignore the error (based on index name), for 'log-' prefixed indices. Ignore the error
if the filter does not result in an actionable list of indices if the filter does not result in an actionable list of indices
(ignore_empty_list) and exit cleanly. (ignore_empty_list) and exit cleanly.
@ -24,6 +24,6 @@ actions:
direction: older direction: older
timestring: '%Y.%m.%d' timestring: '%Y.%m.%d'
unit: days unit: days
unit_count: {{ elasticsearch_retention_period }} unit_count: {{ elasticsearch.retention_period }}
exclude: exclude:

View File

@ -4,7 +4,7 @@
client: client:
hosts: hosts:
- localhost - localhost
port: {{ elasticsearch_port }} port: {{ elasticsearch.port }}
url_prefix: url_prefix:
use_ssl: False use_ssl: False
certificate: certificate:

View File

@ -1,25 +1,33 @@
configs: configs:
elasticsearch_port: 9200 elasticsearch:
elasticsearch_cluster_port: 9300 port: 9200
elasticsearch_retention_period: 30 cluster_port: 9300
kibana_port: 5601 retention_period: 30
heka_max_procs: 2 kibana:
heka_service_pattern: "^k8s_(.-)%..*" port: 5601
heka:
max_procs: 2
service_pattern: "^k8s_(.-)%..*"
hindsight_heka_tcp_port: 5565 hindsight_heka_tcp_port: 5565
grafana_host: "grafana" grafana:
grafana_port: 3000 host: "grafana"
grafana_user: "admin" port: 3000
grafana_password: "admin" user: "admin"
influxdb_database: "ccp" password: "admin"
influxdb_host: "influxdb" influxdb:
influxdb_password: "" database: "ccp"
influxdb_port: 8086 host: "influxdb"
influxdb_user: "" password: ""
snap_log_level: 3 port: 8086
cron_rotate_interval: "daily" user: ""
cron_rotate_days: 6 snap:
cron_rotate_minsize: "1M" log_level: 3
cron_rotate_maxsize: "100M" cron:
rotate:
interval: "daily"
days: 6
minsize: "1M"
maxsize: "100M"
versions: versions:
influxdb_version: "0.13.0" influxdb_version: "0.13.0"
grafana_version: "3.0.3-1463994644" grafana_version: "3.0.3-1463994644"

View File

@ -1,23 +1,23 @@
#!/bin/bash #!/bin/bash
echo "Waiting for Grafana to come up..." echo "Waiting for Grafana to come up..."
until $(curl --fail --output /dev/null --silent http://{{ grafana_user }}:{{ grafana_password }}@localhost:{{ grafana_port }}/api/org); do until $(curl --fail --output /dev/null --silent http://{{ grafana.user }}:{{ grafana.password }}@localhost:{{ grafana.port }}/api/org); do
printf "." printf "."
sleep 2 sleep 2
done done
echo -e "Grafana is up and running.\n" echo -e "Grafana is up and running.\n"
echo "Creating InfluxDB datasource..." echo "Creating InfluxDB datasource..."
curl -i -XPOST -H "Accept: application/json" -H "Content-Type: application/json" "http://{{ grafana_user }}:{{ grafana_password }}@localhost:{{ grafana_port }}/api/datasources" -d ' curl -i -XPOST -H "Accept: application/json" -H "Content-Type: application/json" "http://{{ grafana.user }}:{{ grafana.password }}@localhost:{{ grafana.port }}/api/datasources" -d '
{ {
"name": "CCP InfluxDB", "name": "CCP InfluxDB",
"type": "influxdb", "type": "influxdb",
"access": "proxy", "access": "proxy",
"isDefault": true, "isDefault": true,
"url": "'"http://{{ influxdb_host }}:{{ influxdb_port }}"'", "url": "'"http://{{ influxdb.host }}:{{ influxdb.port }}"'",
"password": "'"{{ influxdb_password }}"'", "password": "'"{{ influxdb.password }}"'",
"user": "'"{{ influxdb_user }}"'", "user": "'"{{ influxdb.user }}"'",
"database": "'"{{ influxdb_database }}"'" "database": "'"{{ influxdb.database }}"'"
}' }'
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Can not create InfluxDB datasource" echo "Can not create InfluxDB datasource"
@ -28,7 +28,7 @@ echo -e "InfluxDB datasource was successfully created.\n"
echo "Importing default dashboards..." echo "Importing default dashboards..."
for dashboard in /dashboards/*.json; do for dashboard in /dashboards/*.json; do
echo -e "\tImporting ${dashboard}..." echo -e "\tImporting ${dashboard}..."
curl -i -XPOST --data "@${dashboard}" -H "Accept: application/json" -H "Content-Type: application/json" "http://{{ grafana_user }}:{{ grafana_password}}@localhost:{{ grafana_port }}/api/dashboards/db" curl -i -XPOST --data "@${dashboard}" -H "Accept: application/json" -H "Content-Type: application/json" "http://{{ grafana.user }}:{{ grafana.password}}@localhost:{{ grafana.port }}/api/dashboards/db"
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Error importing ${dashboard}" echo "Error importing ${dashboard}"
exit 1 exit 1

View File

@ -6,7 +6,7 @@ fields = ["Timestamp", "Type", "Logger", "Severity", "Payload", "Pid", "Hostname
[elasticsearch_output] [elasticsearch_output]
type = "ElasticSearchOutput" type = "ElasticSearchOutput"
server = "http://elasticsearch:{{ elasticsearch_port }}" server = "http://elasticsearch:{{ elasticsearch.port }}"
message_matcher = "Type == 'log'" message_matcher = "Type == 'log'"
encoder = "elasticsearch_json_encoder" encoder = "elasticsearch_json_encoder"
use_buffering = true use_buffering = true

View File

@ -1,5 +1,5 @@
[hekad] [hekad]
maxprocs = {{ heka_max_procs }} maxprocs = {{ heka.max_procs }}
[debug_output] [debug_output]
type = "LogOutput" type = "LogOutput"

View File

@ -3,4 +3,4 @@ type = "SandboxDecoder"
filename = "lua_decoders/os_openstack_log.lua" filename = "lua_decoders/os_openstack_log.lua"
[openstack_log_decoder.config] [openstack_log_decoder.config]
heka_service_pattern = "{{ heka_service_pattern }}" heka_service_pattern = "{{ heka.service_pattern }}"

View File

@ -3,4 +3,4 @@ type = "SandboxDecoder"
filename = "lua_decoders/os_ovs.lua" filename = "lua_decoders/os_ovs.lua"
[ovs_log_decoder.config] [ovs_log_decoder.config]
heka_service_pattern = "{{ heka_service_pattern }}" heka_service_pattern = "{{ heka.service_pattern }}"

View File

@ -1,7 +1,7 @@
filename = "influxdb_tcp.lua" filename = "influxdb_tcp.lua"
host = "influxdb" host = "influxdb"
port = {{ influxdb_port }} port = {{ influxdb.port }}
database = "{{ influxdb_database }}" database = "{{ influxdb.database }}"
batch_max_lines = 3000 batch_max_lines = 3000
message_matcher = "TRUE" message_matcher = "TRUE"
ticker_interval = 10 ticker_interval = 10

View File

@ -13,5 +13,5 @@ reporting-disabled = true
[http] [http]
auth-enabled = false # FIXME(elemoine) auth-enabled = false # FIXME(elemoine)
bind-address = "{{ network_topology["private"]["address"] }}:{{ influxdb_port }}" bind-address = "{{ network_topology["private"]["address"] }}:{{ influxdb.port }}"
log-enabled = false log-enabled = false

View File

@ -1,11 +1,11 @@
# Kibana is served by a back end server. This controls which port to use. # Kibana is served by a back end server. This controls which port to use.
port: {{ kibana_port }} port: {{ kibana.port }}
# The host to bind the server to. # The host to bind the server to.
host: "{{ network_topology["private"]["address"] }}" host: "{{ network_topology["private"]["address"] }}"
# The Elasticsearch instance to use for all your queries. # The Elasticsearch instance to use for all your queries.
elasticsearch_url: "http://elasticsearch:{{ elasticsearch_port }}" elasticsearch_url: "http://elasticsearch:{{ elasticsearch.port }}"
# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false, # preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
# then the host you use to connect to *this* Kibana instance will be sent. # then the host you use to connect to *this* Kibana instance will be sent.

View File

@ -1,4 +1,4 @@
log_level: {{ snap_log_level }} log_level: {{ snap.log_level }}
control: control:
plugin_load_timeout: 15 plugin_load_timeout: 15
plugin_trust_level: 0 plugin_trust_level: 0

View File

@ -1,7 +1,7 @@
service: service:
name: grafana name: grafana
ports: ports:
- {{ grafana_port }} - {{ grafana.port }}
containers: containers:
- name: grafana - name: grafana
image: grafana image: grafana

View File

@ -1,7 +1,7 @@
service: service:
name: influxdb name: influxdb
ports: ports:
- {{ influxdb_port }} - {{ influxdb.port }}
containers: containers:
- name: influxdb - name: influxdb
image: influxdb image: influxdb

View File

@ -1,7 +1,7 @@
service: service:
name: kibana name: kibana
ports: ports:
- {{ kibana_port }} - {{ kibana.port }}
containers: containers:
- name: kibana - name: kibana
image: kibana image: kibana