Merge remote-tracking branch 'upstream/stacklight' into merge-branch

Change-Id: I1dda55c0317a4df6ffea1fa11d521941328fc1e6
This commit is contained in:
Simon Pasquier 2016-11-25 16:18:32 +01:00
commit 8b6a5c4964
6 changed files with 5972 additions and 2 deletions

View File

@ -0,0 +1,10 @@
Import "openstack_cinder"
<Module "openstack_cinder">
KeystoneUrl "{{ plugin.url }}"
Username "{{ plugin.username }}"
Password "{{ plugin.password }}"
Tenant "{{ plugin.tenant }}"
MaxRetries "2"
Timeout "20"
</Module>

File diff suppressed because it is too large Load Diff

19
cinder/meta/collectd.yml Normal file
View File

@ -0,0 +1,19 @@
{%- if pillar.cinder.controller is defined %}
{%- from "cinder/map.jinja" import controller with context %}
{%- if pillar.cinder.controller.get('enabled', False) %}
local_plugin:
collectd_check_local_endpoint:
endpoint:
cinder-api:
expected_code: 200
url: "http://{{ controller.osapi.host|replace('0.0.0.0', '127.0.0.1') }}:8776/"
remote_plugin:
openstack_cinder:
plugin: python
template: cinder/files/collectd_openstack_cinder.conf
url: "http://{{ controller.identity.host }}:{{ controller.identity.port }}/v{% if controller.identity.get('api_version', 2)|int == 2 %}2.0{% else %}3{% endif %}"
username: {{ controller.identity.user }}
password: {{ controller.identity.password }}
tenant: {{ controller.identity.tenant }}
{%- endif %}
{%- endif %}

4
cinder/meta/grafana.yml Normal file
View File

@ -0,0 +1,4 @@
dashboard:
cinder:
format: json
template: cinder/files/grafana_dashboards/cinder_influxdb.json

285
cinder/meta/heka.yml Normal file
View File

@ -0,0 +1,285 @@
log_collector:
decoder:
cinder:
engine: sandbox
module_file: /usr/share/lma_collector/decoders/openstack_log.lua
module_dir: /usr/share/lma_collector/common;/usr/share/heka/lua_modules
adjust_timezone: true
splitter:
cinder:
engine: token
delimiter: '\n'
input:
cinder_log:
engine: logstreamer
log_directory: "/var/log"
file_match: 'cinder/(?P<Service>.+)\.log\.?(?P<Seq>\d*)$'
differentiator: ['cinder', '_', 'Service']
priority: ["^Seq"]
decoder: "cinder_decoder"
splitter: "cinder_splitter"
metric_collector:
trigger:
cinder_logs_error:
description: 'Too many errors have been detected in Cinder logs'
severity: warning
no_data_policy: okay
rules:
- metric: log_messages
field:
service: cinder
level: error
relational_operator: '>'
threshold: 0.1
window: 70
periods: 0
function: max
{%- if pillar.cinder.controller is defined %}
cinder_api_local_endpoint:
description: 'Cinder API is locally down'
severity: down
rules:
- metric: openstack_check_local_api
field:
service: cinder-api
relational_operator: '=='
threshold: 0
window: 60
periods: 0
function: last
{%- endif %}
alarm:
{%- if pillar.cinder.controller is defined %}
cinder_logs:
alerting: enabled
triggers:
- cinder_logs_error
dimension:
service: cinder-logs
cinder_api_endpoint:
alerting: enabled
triggers:
- cinder_api_local_endpoint
dimension:
service: cinder-api-endpoint
{%- endif %}
{%- if pillar.cinder.compute is defined %}
cinder_logs_volume:
alerting: enabled
triggers:
- cinder_logs_error
dimension:
service: cinder-logs-volume
{%- endif %}
remote_collector:
trigger:
{%- if pillar.cinder.controller is defined %}
cinder_api_check_failed:
description: 'Endpoint check for cinder-api is failed'
severity: down
rules:
- metric: openstack_check_api
field:
service: cinder-api
relational_operator: '=='
threshold: 0
window: 60
periods: 0
function: last
cinder_scheduler_one_down:
description: 'At least one Cinder scheduler is down'
severity: warning
rules:
- metric: openstack_cinder_services
field:
service: scheduler
state: down
relational_operator: '>'
threshold: 0
window: 60
periods: 0
function: last
cinder_scheduler_majority_down:
description: 'Majority of Cinder schedulers are down'
severity: critical
rules:
- metric: openstack_cinder_services_percent
field:
service: scheduler
state: up
relational_operator: '<='
threshold: 50
window: 60
periods: 0
function: last
cinder_scheduler_all_down:
description: 'All Cinder schedulers are down'
severity: down
rules:
- metric: openstack_cinder_services
field:
service: scheduler
state: up
relational_operator: '=='
threshold: 0
window: 60
periods: 0
function: last
{%- endif %}
{%- if pillar.cinder.volume is defined %}
cinder_volume_one_down:
description: 'At least one Cinder volume is down'
severity: warning
rules:
- metric: openstack_cinder_services
field:
service: scheduler
state: down
relational_operator: '>'
threshold: 0
window: 60
periods: 0
function: last
cinder_volume_majority_down:
description: 'Majority of Cinder volumes are down'
severity: critical
rules:
- metric: openstack_cinder_services_percent
field:
service: volume
state: up
relational_operator: '<='
threshold: 50
window: 60
periods: 0
function: last
cinder_volume_all_down:
description: 'All Cinder volumes are down'
severity: down
rules:
- metric: openstack_cinder_services
field:
service: volume
state: up
relational_operator: '=='
threshold: 0
window: 60
periods: 0
function: last
{%- endif %}
alarm:
{%- if pillar.cinder.controller is defined %}
cinder_api_check:
alerting: enabled
triggers:
- cinder_api_check_failed
dimension:
service: cinder-api-check
cinder_scheduler:
alerting: enabled
triggers:
- cinder_scheduler_all_down
- cinder_scheduler_majority_down
- cinder_scheduler_one_down
dimension:
service: cinder-scheduler
{%- endif %}
{%- if pillar.cinder.volume is defined %}
cinder_volume:
alerting: enabled
triggers:
- cinder_volume_all_down
- cinder_volume_majority_down
- cinder_volume_one_down
dimension:
service: cinder-volume
{%- endif %}
aggregator:
alarm_cluster:
cinder_logs_volume:
policy: highest_severity
alerting: enabled
group_by: hostname
match:
service: cinder-logs-volume
members:
- cinder_logs_volume
dimension:
service: cinder-data
nagios_host: 01-service-clusters
cinder_logs:
policy: highest_severity
alerting: enabled
group_by: hostname
match:
service: cinder-logs
members:
- cinder_logs
dimension:
service: cinder-control
nagios_host: 01-service-clusters
cinder_api_endpoint:
policy: availability_of_members
alerting: enabled
group_by: hostname
match:
service: cinder-api-endpoint
members:
- cinder_api_endpoint
dimension:
service: cinder-control
nagios_host: 01-service-clusters
cinder_api_check:
policy: highest_severity
alerting: enabled
match:
service: cinder-api-check
members:
- cinder_api_check
dimension:
service: cinder-control
nagios_host: 01-service-clusters
cinder_volume:
policy: highest_severity
alerting: enabled
match:
service: cinder-volume
members:
- cinder_volume
dimension:
service: cinder-data
nagios_host: 01-service-clusters
cinder_scheduler:
policy: highest_severity
alerting: enabled
match:
service: cinder-scheduler
members:
- cinder_scheduler
dimension:
service: cinder-control
nagios_host: 01-service-clusters
cinder_control:
policy: highest_severity
alerting: enabled_with_notification
match:
service: cinder-control
members:
- cinder_logs
- cinder_api_endpoint
- cinder_api_check
- cinder_scheduler
dimension:
cluster_name: cinder-control
nagios_host: 00-top-clusters
cinder_data:
policy: highest_severity
alerting: enabled_with_notification
match:
service: cinder-data
members:
- cinder_logs_volume
- cinder_volume
dimension:
cluster_name: cinder-data
nagios_host: 00-top-clusters

View File

@ -2,7 +2,7 @@ parameters:
cinder:
_support:
collectd:
enabled: false
enabled: true
heka:
enabled: true
sensu:
@ -10,4 +10,6 @@ parameters:
sphinx:
enabled: true
config:
enabled: true
enabled: true
grafana:
enabled: true