Add deployment of persisters
This change adds the Puppet code to deploy the persisters that read samples from RabbitMQ and push them to InfluxDB. Change-Id: Iff1bb26b795f9562162b5b02b482571e288f8ca1
This commit is contained in:
parent
bdd800422e
commit
e81d7ee353
|
@ -1,3 +1,9 @@
|
|||
notice('MODULAR: fuel-plugin-telemetry: influxdb-create-db.pp')
|
||||
|
||||
class { 'telemetry::create_influxdb_database': }
|
||||
$influxdb_mode = hiera('telemetry::influxdb::mode')
|
||||
|
||||
if $influxdb_mode == 'remote' {
|
||||
|
||||
class { 'telemetry::create_influxdb_database': }
|
||||
|
||||
}
|
||||
|
|
|
@ -9,40 +9,29 @@ include ::ceilometer::params
|
|||
# Still needed $aodh_nodes ?
|
||||
$aodh_nodes = hiera('aodh_nodes')
|
||||
|
||||
# TODO_3 es_node should be configured because of a bug in Ceilometer API
|
||||
if hiera('lma::collector::elasticsearch::server', false) {
|
||||
$elasticsearch_node = hiera('lma::collector::elasticsearch::server')
|
||||
$elasticsearch_port = hiera('lma::collector::elasticsearch::rest_port')
|
||||
} else {
|
||||
$elasticsearch_node = ''
|
||||
$elasticsearch_port = ''
|
||||
$elasticsearch_node = '0.0.0.0'
|
||||
$elasticsearch_port = '9200'
|
||||
}
|
||||
|
||||
|
||||
if hiera('lma::collector::influxdb::server', false) {
|
||||
$influxdb_vip = hiera('lma::collector::influxdb::server')
|
||||
$influxdb_port = hiera('lma::collector::influxdb::port')
|
||||
$influx_database = hiera('lma::collector::influxdb::database')
|
||||
# TODO move to hiera
|
||||
$influx_user = 'root'
|
||||
$influx_password = hiera('lma::collector::influxdb::password')
|
||||
$influx_root_password = hiera('lma::collector::influxdb::root_password')
|
||||
} else {
|
||||
$influxdb_vip = ''
|
||||
$influxdb_port = ''
|
||||
$influx_database = ''
|
||||
# TODO move to hiera
|
||||
$influx_user = ''
|
||||
$influx_password = ''
|
||||
$influx_root_password = ''
|
||||
}
|
||||
$ceilometer_service_name = $::ceilometer::params::api_service_name
|
||||
# TODO move to hiera
|
||||
$event_pipeline_file = '/etc/ceilometer/event_pipeline.yaml'
|
||||
# TODO move to hiera
|
||||
$ceilometer_publishers = 'direct'
|
||||
|
||||
# calculated values
|
||||
$metering_connection = "stacklight://${influx_user}:${influx_password}@${influxdb_vip}:${influxdb_port}/ceilometer"
|
||||
$influxdb_address = hiera('telemetry::influxdb::address')
|
||||
$influxdb_port = hiera('telemetry::influxdb::port')
|
||||
$influxdb_database = hiera('telemetry::influxdb::database')
|
||||
$influx_user = hiera('telemetry::influxdb::user')
|
||||
$influx_password = hiera('telemetry::influxdb::password')
|
||||
|
||||
$metering_connection = "stacklight://${influx_user}:${influx_password}@${influxdb_address}:${influxdb_port}/${influxdb_database}"
|
||||
|
||||
$resource_connection = "es://${elasticsearch_node}:${elasticsearch_port}"
|
||||
$event_connection = "es://${elasticsearch_node}:${elasticsearch_port}"
|
||||
$connection = $metering_connection
|
||||
|
@ -90,8 +79,10 @@ service { 'ceilometer-collector':
|
|||
|
||||
ceilometer_config { 'database/metering_connection': value => $metering_connection }
|
||||
ceilometer_config { 'database/resource_connection': value => $resource_connection }
|
||||
ceilometer_config { 'database/event_connection': value => $event_connection }
|
||||
ceilometer_config { 'database/connection': value => $connection }
|
||||
ceilometer_config { 'database/event_connection': value => $event_connection }
|
||||
ceilometer_config { 'database/connection': value => $connection }
|
||||
ceilometer_config { 'notification/store_events': value => false }
|
||||
|
||||
|
||||
service { 'ceilometer-service':
|
||||
ensure => $service_ensure,
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
notice('fuel-plugin-lma-collector: configure_apt.pp')
|
||||
|
||||
$str = 'APT::Install-Suggests "0";
|
||||
APT::Install-Recommends "0";
|
||||
'
|
||||
|
||||
case $::osfamily {
|
||||
'Debian': {
|
||||
file { '/etc/apt/apt.conf.d/99norecommends':
|
||||
ensure => file,
|
||||
content => $str,
|
||||
}
|
||||
}
|
||||
default: {
|
||||
# Currently only Debian like distributions need specific configuration.
|
||||
}
|
||||
}
|
||||
|
||||
# TODO enable roles detection in hiera
|
||||
#$node_profiles = hiera_hash('lma::collector::node_profiles')
|
||||
#if $node_profiles['controller'] or $node_profiles['rabbitmq'] or $node_profiles['mysql'] {
|
||||
if true {
|
||||
# The OCF script should exist before any node tries to configure the
|
||||
# collector services with Pacemaker. This is why it is shipped by this
|
||||
# manifest.
|
||||
file { 'ocf-lma_collector':
|
||||
ensure => present,
|
||||
source => 'puppet:///modules/telemetry/ocf-lma_collector',
|
||||
path => '/usr/lib/ocf/resource.d/fuel/ocf-lma_collector',
|
||||
mode => '0755',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
|
||||
## Get values
|
||||
|
||||
$config_dir = hiera('telemetry::heka::config_dir')
|
||||
$amqp_url = hiera('telemetry::rabbit::url')
|
||||
|
||||
if hiera('telemetry::elasticsearch::server',false) {
|
||||
$ip = hiera('telemetry::elasticsearch::server')
|
||||
$port = hiera('telemetry::elasticsearch::server')
|
||||
$elasticsearch_url = "http://${ip}:${port}"
|
||||
} else {
|
||||
#no Elasticsearch
|
||||
#heka failed to start if url schemma is not valid, so we set http here
|
||||
$elasticsearch_url = 'http://'
|
||||
}
|
||||
|
||||
$influxdb_address = hiera('telemetry::influxdb::address')
|
||||
$influxdb_port = hiera('telemetry::influxdb::port')
|
||||
$influxdb_database = hiera('telemetry::influxdb::database')
|
||||
$influxdb_user = hiera('telemetry::influxdb::user')
|
||||
$influxdb_password = hiera('telemetry::influxdb::password')
|
||||
|
||||
### Heka configuration
|
||||
|
||||
file {
|
||||
"${config_dir}/amqp-openstack_sample.toml": content => template( 'telemetry/heka/amqp-openstack_sample.toml.erb' );
|
||||
"${config_dir}/decoder-sample.toml": content => template( 'telemetry/heka/decoder-sample.toml.erb' );
|
||||
"${config_dir}/encoder-influxdb.toml": content => template( 'telemetry/heka/encoder-influxdb.toml.erb' );
|
||||
"${config_dir}/encoder-resource-elasticsearch.toml": content => template( 'telemetry/heka/encoder-resource-elasticsearch.toml.erb' );
|
||||
"${config_dir}/file-output-resource.toml": content => template( 'telemetry/heka/file-output-resource.toml.erb' );
|
||||
"${config_dir}/file-output.toml": content => template( 'telemetry/heka/file-output.toml.erb' );
|
||||
"${config_dir}/filter-influxdb_accumulator_sample.toml": content => template( 'telemetry/heka/filter-influxdb_accumulator_sample.toml.erb' );
|
||||
# TODO disable config when Elasticsearch not in use
|
||||
"${config_dir}/output-elasticsearch-resource2.toml": content => template( 'telemetry/heka/output-elasticsearch-resource2.toml.erb' );
|
||||
"${config_dir}/output-influxdb-samples.toml": content => template( 'telemetry/heka/output-influxdb-samples.toml.erb' );
|
||||
}
|
||||
|
||||
### Heka lua scripts
|
||||
|
||||
$heka_dir = '/usr/share/heka'
|
||||
$modules_dir = '/usr/share/heka/lua_modules'
|
||||
|
||||
file {
|
||||
$heka_dir: ensure => 'directory';
|
||||
$modules_dir: ensure => 'directory';
|
||||
}
|
||||
|
||||
file {
|
||||
"${modules_dir}/decoders": ensure => 'directory';
|
||||
"${modules_dir}/encoders": ensure => 'directory';
|
||||
"${modules_dir}/filters": ensure => 'directory';
|
||||
}
|
||||
|
||||
file {
|
||||
"${modules_dir}/decoders/metering.lua":
|
||||
source => 'puppet:///modules/telemetry/decoders/metering.lua'
|
||||
;
|
||||
"${modules_dir}/encoders/es_bulk.lua":
|
||||
source => 'puppet:///modules/telemetry/encoders/es_bulk.lua'
|
||||
;
|
||||
"${modules_dir}/filters/influxdb_ceilometer_accumulator.lua":
|
||||
source => 'puppet:///modules/telemetry/filters/influxdb_ceilometer_accumulator.lua'
|
||||
;
|
||||
}
|
||||
|
||||
### Heka extra modules
|
||||
|
||||
file {
|
||||
"${modules_dir}/extra_fields.lua":
|
||||
source => 'puppet:///modules/telemetry/extra_fields.lua'
|
||||
;
|
||||
"${modules_dir}/lma_utils.lua":
|
||||
source => 'puppet:///modules/telemetry/lma_utils.lua'
|
||||
;
|
||||
"${modules_dir}/patterns.lua":
|
||||
source => 'puppet:///modules/telemetry/patterns.lua'
|
||||
;
|
||||
}
|
||||
|
||||
# Heka Installation
|
||||
|
||||
$version = hiera('telemetry::heka::version')
|
||||
$max_message_size = hiera('telemetry::heka::max_message_size')
|
||||
$max_process_inject = hiera('telemetry::heka::max_process_inject')
|
||||
$max_timer_inject = hiera('telemetry::heka::max_timer_inject')
|
||||
$poolsize = hiera('telemetry::heka::poolsize')
|
||||
|
||||
# TODO we dont't need them on controller
|
||||
$install_init_script = true
|
||||
|
||||
::heka { 'persister_collector':
|
||||
config_dir => '/etc/persister_collector',
|
||||
user => $user,
|
||||
#additional_groups => $additional_groups,
|
||||
hostname => $::hostname,
|
||||
max_message_size => $max_message_size,
|
||||
max_process_inject => $max_process_inject,
|
||||
max_timer_inject => $max_timer_inject,
|
||||
poolsize => $poolsize,
|
||||
install_init_script => $install_init_script,
|
||||
version => $version,
|
||||
}
|
||||
|
||||
#TODO enable pacemaker ?
|
||||
# pacemaker::service { 'persister_collector':
|
||||
# ensure => present,
|
||||
# prefix => false,
|
||||
# primitive_class => 'ocf',
|
||||
# primitive_type => 'ocf-lma_collector',
|
||||
# use_handler => false,
|
||||
# complex_type => 'clone',
|
||||
# complex_metadata => {
|
||||
# # the resource should start as soon as the dependent resources
|
||||
# # (eg RabbitMQ) are running *locally*
|
||||
# 'interleave' => true,
|
||||
# },
|
||||
# metadata => {
|
||||
# # Make sure that Pacemaker tries to restart the resource if it fails
|
||||
# # too many times
|
||||
# 'failure-timeout' => '120s',
|
||||
# 'migration-threshold' => '3',
|
||||
# },
|
||||
# parameters => {
|
||||
# 'service_name' => 'persister_collector',
|
||||
# 'config' => '/etc/persister_collector',
|
||||
# 'log_file' => '/var/log/persister_collector.log',
|
||||
# 'user' => $user,
|
||||
# },
|
||||
# operations => {
|
||||
# 'monitor' => {
|
||||
# 'interval' => '20',
|
||||
# 'timeout' => '10',
|
||||
# },
|
||||
# 'start' => {
|
||||
# 'timeout' => '30',
|
||||
# },
|
||||
# 'stop' => {
|
||||
# 'timeout' => '30',
|
||||
# },
|
||||
# },
|
||||
# # require => Lma_collector::Heka['log_collector'],
|
||||
# }
|
||||
|
||||
service { 'persister_collector':
|
||||
ensure => 'running',
|
||||
#ensure => 'stopped',
|
||||
enable => true,
|
||||
#provider => 'pacemaker',
|
||||
provider => 'upstart',
|
||||
}
|
||||
|
|
@ -1,5 +1,13 @@
|
|||
notice('MODULAR: fuel-plugin-telemetry: hiera.pp')
|
||||
|
||||
$plugin_data = hiera_hash('telemetry', undef)
|
||||
prepare_network_config(hiera_hash('network_scheme', {}))
|
||||
$network_metadata = hiera_hash('network_metadata')
|
||||
$hiera_file = '/etc/hiera/plugins/telemetry.yaml'
|
||||
$telemetry = hiera('telemetry')
|
||||
|
||||
# Ceilometer
|
||||
|
||||
$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash)
|
||||
$ceilometer_alarm_history_time_to_live = $ceilometer_hash['alarm_history_time_to_live']
|
||||
$ceilometer_event_time_to_live = $ceilometer_hash['event_time_to_live']
|
||||
|
@ -13,7 +21,104 @@ $ceilometer_user_password = $ceilometer_hash['user_password']
|
|||
$elasticsearch_script_inline = 'on'
|
||||
$elasticsearch_script_indexed = 'on'
|
||||
|
||||
$hiera_file = '/etc/hiera/plugins/telemetry.yaml'
|
||||
# Elasticsearch
|
||||
|
||||
$is_elasticsearch_node = roles_include(['elasticsearch_kibana', 'primary-elasticsearch_kibana'])
|
||||
|
||||
if $plugin_data['elastic_search_ip'] {
|
||||
$elasticsearch_mode = 'remote'
|
||||
} else {
|
||||
$elasticsearch_mode = 'local'
|
||||
}
|
||||
|
||||
#$elasticsearch_mode = $plugin_data['elasticsearch_mode']
|
||||
$es_nodes = get_nodes_hash_by_roles($network_metadata, ['elasticsearch_kibana', 'primary-elasticsearch_kibana'])
|
||||
$es_nodes_count = count($es_nodes)
|
||||
|
||||
case $elasticsearch_mode {
|
||||
'remote': {
|
||||
$es_server = $plugin_data['elastic_search_ip']
|
||||
}
|
||||
'local': {
|
||||
$es_vip_name = 'es_vip_mgmt'
|
||||
if $network_metadata['vips'][$es_vip_name] {
|
||||
$es_server = $network_metadata['vips'][$es_vip_name]['ipaddr']
|
||||
} else {
|
||||
$es_server = undef
|
||||
}
|
||||
}
|
||||
default: {
|
||||
fail("'${elasticsearch_mode}' mode not supported for Elasticsearch")
|
||||
}
|
||||
}
|
||||
if $es_nodes_count > 0 or $es_server {
|
||||
$es_is_deployed = true
|
||||
} else {
|
||||
$es_is_deployed = false
|
||||
}
|
||||
|
||||
# InfluxDB
|
||||
|
||||
if $telemetry['influxdb_address'] {
|
||||
|
||||
notice('Use external InfluxDB')
|
||||
|
||||
$influxdb_mode = 'remote'
|
||||
|
||||
$influxdb_address = $telemetry['influxdb_address']
|
||||
$influxdb_port = $telemetry['influxdb_port']
|
||||
$influxdb_database = $telemetry['influxdb_database']
|
||||
$influxdb_user = $telemetry['influxdb_user']
|
||||
$influxdb_password = $telemetry['influxdb_password']
|
||||
|
||||
# TODO hardcode or move to params?
|
||||
$retention_period = '30'
|
||||
|
||||
} else {
|
||||
|
||||
notice('Use StackLight integrated InfluxDB')
|
||||
|
||||
$influxdb_mode = 'local'
|
||||
|
||||
if !hiera('influxdb_grafana',false) {
|
||||
fail(join([
|
||||
'The StackLight InfluxDB-Grafana Plugin not found, ',
|
||||
'please configure external InfluxDB in advanced settings or install the plugin'
|
||||
]))
|
||||
}
|
||||
|
||||
$influxdb_grafana = hiera('influxdb_grafana')
|
||||
$influxdb_nodes = get_nodes_hash_by_roles($network_metadata, ['influxdb_grafana', 'primary-influxdb_grafana'])
|
||||
$nodes_array = values($influxdb_nodes)
|
||||
|
||||
if count($nodes_array)==0 {
|
||||
fail(join([
|
||||
'No nodes with InfluxDB Grafana role, please add one or more nodes',
|
||||
'with this role to the environment or configure external InfluxDB in advanced settings'
|
||||
]))
|
||||
}
|
||||
|
||||
# TODO test for multiple inxlixdb nodes !!!
|
||||
$influxdb_address = $nodes_array[0]['network_roles']['management']
|
||||
|
||||
$retention_period = $influxdb_grafana['retention_period']
|
||||
$influxdb_user = $influxdb_grafana['influxdb_username']
|
||||
$influxdb_password = $influxdb_grafana['influxdb_userpass']
|
||||
$influxdb_port = '8086'
|
||||
$influxdb_database = 'ceilometer'
|
||||
$influxdb_rootpass = $influxdb_grafana['influxdb_rootpass']
|
||||
|
||||
}
|
||||
|
||||
# Rabbit
|
||||
|
||||
$rabbit_info = hiera('rabbit')
|
||||
$rabbit_password = $rabbit_info['password']
|
||||
$rabbit_user = $rabbit_info['user']
|
||||
# TODO take one?
|
||||
$amqp_host = hiera('amqp_hosts')
|
||||
$amqp_url = "amqp://${rabbit_user}:${rabbit_password}@${amqp_host}/"
|
||||
|
||||
|
||||
$calculated_content = inline_template('
|
||||
---
|
||||
|
@ -31,8 +136,31 @@ ceilometer:
|
|||
# Required for StackLight LMA ElasticSearch params
|
||||
lma::elasticsearch::script_inline: "<%= @elasticsearch_script_inline %>"
|
||||
lma::elasticsearch::script_indexed: "<%= @elasticsearch_script_indexed %>"
|
||||
')
|
||||
|
||||
<% if @es_is_deployed -%>
|
||||
telemetry::elasticsearch::server: <%= @es_server %>
|
||||
telemetry::elasticsearch::rest_port: 9200
|
||||
<% end -%>
|
||||
|
||||
telemetry::influxdb::mode: <%= @influxdb_mode %>
|
||||
telemetry::influxdb::address: <%= @influxdb_address %>
|
||||
telemetry::influxdb::port: <%= @influxdb_port %>
|
||||
telemetry::influxdb::database: <%= @influxdb_database %>
|
||||
telemetry::influxdb::user: <%= @influxdb_user %>
|
||||
telemetry::influxdb::password: <%= @influxdb_password %>
|
||||
telemetry::influxdb::retention_period: <%= @retention_period %>
|
||||
telemetry::influxdb::rootpass: <%= @influxdb_rootpass %>
|
||||
|
||||
telemetry::heka::version: "0.10.0"
|
||||
telemetry::heka::max_message_size: 262144
|
||||
telemetry::heka::max_process_inject: 1
|
||||
telemetry::heka::max_timer_inject: 10
|
||||
telemetry::heka::poolsize: 100
|
||||
telemetry::heka::config_dir: "/etc/persister_collector"
|
||||
|
||||
telemetry::rabbit::url: "<%= @amqp_url %>"
|
||||
|
||||
')
|
||||
|
||||
file { $hiera_file:
|
||||
ensure => file,
|
||||
|
|
|
@ -0,0 +1,337 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# See usage() function below for more details ...
|
||||
#
|
||||
# OCF instance parameters:
|
||||
# OCF_RESKEY_binary
|
||||
# OCF_RESKEY_config
|
||||
# OCF_RESKEY_log_file
|
||||
# OCF_RESKEY_user
|
||||
# OCF_RESKEY_watchdog_file
|
||||
# OCF_RESKEY_watchdog_timeout
|
||||
#######################################################################
|
||||
# Initialization:
|
||||
|
||||
: "${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}"
|
||||
. "${OCF_FUNCTIONS_DIR}/ocf-shellfuncs"
|
||||
|
||||
#######################################################################
|
||||
|
||||
# Fill in some defaults if no values are specified
|
||||
|
||||
OCF_RESKEY_binary_default="/usr/bin/hekad"
|
||||
OCF_RESKEY_user_default="root"
|
||||
OCF_RESKEY_watchdog_file_default=
|
||||
OCF_RESKEY_watchdog_timeout_default=20
|
||||
|
||||
: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
|
||||
: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}}
|
||||
: ${OCF_RESKEY_watchdog_file=${OCF_RESKEY_watchdog_file_default}}
|
||||
: ${OCF_RESKEY_watchdog_timeout=${OCF_RESKEY_watchdog_timeout_default}}
|
||||
|
||||
#######################################################################
|
||||
|
||||
usage() {
|
||||
cat <<UEND
|
||||
usage: $0 (start|stop|validate-all|meta-data|status|monitor)
|
||||
|
||||
$0 manages the collector process as an HA resource
|
||||
|
||||
The 'start' operation starts the collector
|
||||
The 'stop' operation stops the collector
|
||||
The 'validate-all' operation reports whether the parameters are valid
|
||||
The 'meta-data' operation reports this RA's meta-data information
|
||||
The 'status' operation reports whether the collector is running
|
||||
The 'monitor' operation reports whether the collector is running
|
||||
|
||||
UEND
|
||||
}
|
||||
|
||||
meta_data() {
|
||||
cat <<END
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||
<resource-agent name="lma_collector">
|
||||
<version>1.0</version>
|
||||
|
||||
<longdesc lang="en">
|
||||
Manages the LMA collector daemon as a Pacemaker Resource.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Manages Log or Metric collector</shortdesc>
|
||||
<parameters>
|
||||
|
||||
<parameter name="service_name" unique="0" required="1">
|
||||
<longdesc lang="en">
|
||||
Name of the collector service.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Collector service name</shortdesc>
|
||||
<content type="string" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="binary" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
Path of the LMA collector binary file that will be run.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">LMA collector binary file</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_binary_default}" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="config" unique="0" required="1">
|
||||
<longdesc lang="en">
|
||||
Path to the LMA collector configuration file or directory
|
||||
</longdesc>
|
||||
<shortdesc lang="en">LMA collector configuration</shortdesc>
|
||||
<content type="string" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="log_file" unique="0" required="1">
|
||||
<longdesc lang="en">
|
||||
Path to the LMA collector log file
|
||||
</longdesc>
|
||||
<shortdesc lang="en">LMA collector log file</shortdesc>
|
||||
<content type="string" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="user" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
User running the LMA collector process
|
||||
</longdesc>
|
||||
<shortdesc lang="en">LMA collector user</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_user_default}" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="watchdog_file" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
The file to monitor that the process is up and running
|
||||
</longdesc>
|
||||
<shortdesc lang="en">LMA collector watchdog file</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_watchdog_file_default}" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="watchdog_timeout" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
How much time the watchdog file can be left unmodified before claiming that
|
||||
the process is unresponsive.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">LMA collector watchdog timeout</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_watchdog_timeout_default}" />
|
||||
</parameter>
|
||||
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
<action name="start" timeout="20" />
|
||||
<action name="stop" timeout="20" />
|
||||
<action name="status" timeout="20" />
|
||||
<action name="monitor" timeout="30" interval="20" />
|
||||
<action name="validate-all" timeout="5" />
|
||||
<action name="meta-data" timeout="5" />
|
||||
</actions>
|
||||
</resource-agent>
|
||||
END
|
||||
}
|
||||
|
||||
#######################################################################
|
||||
# Functions invoked by resource manager actions
|
||||
|
||||
service_validate() {
|
||||
local rc
|
||||
|
||||
PID_FILE="${HA_RSCTMP}/${__SCRIPT_NAME}/${OCF_RESKEY_service_name}.pid"
|
||||
|
||||
check_binary "$OCF_RESKEY_binary"
|
||||
|
||||
if [[ ! -f $OCF_RESKEY_config && ! -d $OCF_RESKEY_config ]]; then
|
||||
ocf_log err "Config $OCF_RESKEY_config doesn't exist"
|
||||
return "$OCF_ERR_GENERIC"
|
||||
fi
|
||||
|
||||
getent passwd "$OCF_RESKEY_user" >/dev/null 2>&1
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
ocf_log err "User $OCF_RESKEY_user doesn't exist"
|
||||
return "$OCF_ERR_GENERIC"
|
||||
fi
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
service_status() {
|
||||
local rc
|
||||
local pid
|
||||
|
||||
# check and make PID file dir
|
||||
local PID_DIR
|
||||
PID_DIR=$( dirname "${PID_FILE}" )
|
||||
if [ ! -d "${PID_DIR}" ] ; then
|
||||
ocf_log debug "Create pid file dir: ${PID_DIR} and chown to ${OCF_RESKEY_user}"
|
||||
mkdir -p "${PID_DIR}"
|
||||
chown -R "${OCF_RESKEY_user}" "${PID_DIR}"
|
||||
chmod 755 "${PID_DIR}"
|
||||
fi
|
||||
|
||||
if [ ! -f "$PID_FILE" ]; then
|
||||
ocf_log info "LMA collector is not running"
|
||||
return "$OCF_NOT_RUNNING"
|
||||
else
|
||||
pid=$(cat "$PID_FILE")
|
||||
fi
|
||||
|
||||
if [ -n "${pid}" ]; then
|
||||
ocf_run -warn kill -s 0 "$pid"
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
ocf_log info "Old PID file found, but LMA collector process isn't running"
|
||||
return "$OCF_NOT_RUNNING"
|
||||
fi
|
||||
else
|
||||
ocf_log err "PID file ${PID_FILE} is empty!"
|
||||
return "$OCF_ERR_GENERIC"
|
||||
fi
|
||||
|
||||
if [ ! -z "$OCF_RESKEY_watchdog_file" ]; then
|
||||
if [ ! -f "$OCF_RESKEY_watchdog_file" ]; then
|
||||
ocf_log info "${OCF_RESKEY_watchdog_file} is missing"
|
||||
return "$OCF_NOT_RUNNING"
|
||||
else
|
||||
now=$(date '+%s')
|
||||
last_access=$(stat -c %Y "${OCF_RESKEY_watchdog_file}")
|
||||
if [ $(( now - last_access )) -gt "${OCF_RESKEY_watchdog_timeout}" ]; then
|
||||
ocf_log err "File ${OCF_RESKEY_watchdog_file} not modified since ${OCF_RESKEY_watchdog_timeout} seconds"
|
||||
return "$OCF_NOT_RUNNING"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
ocf_log debug "Skip watchdog check since watchdog_file parameter is not set"
|
||||
fi
|
||||
|
||||
return "$OCF_SUCCESS"
|
||||
}
|
||||
|
||||
service_monitor() {
|
||||
local rc
|
||||
service_status
|
||||
rc=$?
|
||||
return $rc
|
||||
}
|
||||
|
||||
service_start() {
|
||||
local rc
|
||||
|
||||
service_monitor
|
||||
rc=$?
|
||||
if [ $rc -eq "$OCF_SUCCESS" ]; then
|
||||
ocf_log info "${OCF_RESKEY_service_name} is already running"
|
||||
return "$OCF_SUCCESS"
|
||||
fi
|
||||
|
||||
# See https://bugs.launchpad.net/lma-toolchain/+bug/1543289
|
||||
ulimit -n 102400
|
||||
|
||||
su "${OCF_RESKEY_user}" -s /bin/sh -c "${OCF_RESKEY_binary} \
|
||||
-config=${OCF_RESKEY_config} >> $OCF_RESKEY_log_file 2>&1"' & echo $!' > "$PID_FILE"
|
||||
|
||||
# Spin waiting for the server to come up
|
||||
while true; do
|
||||
service_monitor
|
||||
rc=$?
|
||||
[ $rc -eq "$OCF_SUCCESS" ] && break
|
||||
if [ $rc -ne "$OCF_NOT_RUNNING" ]; then
|
||||
ocf_log err "${OCF_RESKEY_service_name} start failed"
|
||||
exit "$OCF_ERR_GENERIC"
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
|
||||
ocf_log info "${OCF_RESKEY_service_name} started"
|
||||
return "$OCF_SUCCESS"
|
||||
}
|
||||
|
||||
service_stop() {
|
||||
local rc
|
||||
local pid
|
||||
|
||||
service_monitor
|
||||
rc=$?
|
||||
if [ $rc -eq "$OCF_NOT_RUNNING" ]; then
|
||||
ocf_log info "${OCF_RESKEY_service_name} is already stopped"
|
||||
return "$OCF_SUCCESS"
|
||||
fi
|
||||
|
||||
# Try SIGTERM
|
||||
pid=$(cat "$PID_FILE")
|
||||
ocf_run kill -s TERM "$pid"
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
ocf_log err "${OCF_RESKEY_service_name} couldn't be stopped"
|
||||
exit "$OCF_ERR_GENERIC"
|
||||
fi
|
||||
|
||||
# stop waiting
|
||||
shutdown_timeout=15
|
||||
if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
|
||||
shutdown_timeout=$(( (OCF_RESKEY_CRM_meta_timeout/1000)-5 ))
|
||||
fi
|
||||
count=0
|
||||
while [ $count -lt $shutdown_timeout ]; do
|
||||
service_monitor
|
||||
rc=$?
|
||||
if [ $rc -eq "$OCF_NOT_RUNNING" ]; then
|
||||
break
|
||||
fi
|
||||
count=$(( count + 1))
|
||||
sleep 1
|
||||
ocf_log debug "${OCF_RESKEY_service_name} still hasn't stopped yet. Waiting ..."
|
||||
done
|
||||
|
||||
service_monitor
|
||||
rc=$?
|
||||
if [ "${rc}" -ne "${OCF_NOT_RUNNING}" ]; then
|
||||
# SIGTERM didn't help either, try SIGKILL
|
||||
ocf_log info "${OCF_RESKEY_service_name} failed to stop after ${shutdown_timeout}s using SIGTERM. Trying SIGKILL ..."
|
||||
ocf_run kill -s KILL "${pid}"
|
||||
fi
|
||||
|
||||
ocf_log info "${OCF_RESKEY_service_name} stopped"
|
||||
|
||||
ocf_log debug "Delete pid file: ${PID_FILE} with content $(cat "${PID_FILE}")"
|
||||
rm -f "${PID_FILE}"
|
||||
|
||||
return "${OCF_SUCCESS}"
|
||||
}
|
||||
|
||||
#######################################################################
|
||||
|
||||
case "$1" in
|
||||
meta-data) meta_data
|
||||
exit "$OCF_SUCCESS";;
|
||||
usage|help) usage
|
||||
exit "$OCF_SUCCESS";;
|
||||
esac
|
||||
|
||||
# Anything except meta-data and help must pass validation
|
||||
service_validate || exit $?
|
||||
|
||||
# What kind of method was invoked?
|
||||
case "$1" in
|
||||
start) service_start;;
|
||||
stop) service_stop;;
|
||||
status) service_status;;
|
||||
monitor) service_monitor;;
|
||||
validate-all) ;;
|
||||
*) usage
|
||||
exit "$OCF_ERR_UNIMPLEMENTED";;
|
||||
esac
|
|
@ -14,63 +14,27 @@
|
|||
|
||||
class telemetry::create_influxdb_database () {
|
||||
|
||||
notice('fuel-plugin-influxdb-grafana: influxdb_configuration.pp')
|
||||
notice('fuel-plugin-influxdb-grafana: influxdb_configuration.pp')
|
||||
|
||||
$telemetry = hiera('telemetry')
|
||||
notice($telemetry)
|
||||
|
||||
if $telemetry['influxdb_ip'] {
|
||||
|
||||
notice('Use External InfluxDB')
|
||||
|
||||
$influxdb_server = $telemetry['influxdb_ip']
|
||||
$local_port = $telemetry['influxdb_port']
|
||||
$admin_user = $telemetry['influxdb_admin_user']
|
||||
$admin_password = $telemetry['influxdb_admin_pass']
|
||||
$username = $telemetry['influxdb_username']
|
||||
$password = $telemetry['influxdb_userpass']
|
||||
$retention_period = 30
|
||||
|
||||
} else {
|
||||
|
||||
notice('Use StackLight integrated InfluxDB')
|
||||
|
||||
if !hiera('influxdb_grafana',false) {
|
||||
fail('influxdb_grafana not found, looks like plugin is not installed')
|
||||
$influxdb_address = hiera('telemetry::influxdb::address')
|
||||
$influxdb_port = hiera('telemetry::influxdb::port')
|
||||
$influxdb_database = hiera('telemetry::influxdb::database')
|
||||
$influxdb_user = hiera('telemetry::influxdb::user')
|
||||
$influxdb_password = hiera('telemetry::influxdb::password')
|
||||
$retention_period = hiera('telemetry::influxdb::retention_period')
|
||||
$admin_user = 'root'
|
||||
$admin_password = hiera('telemetry::influxdb::rootpass')
|
||||
$influxdb_url = "http://${influxdb_address}:${influxdb_port}"
|
||||
$replication_factor = 3
|
||||
|
||||
telemetry::influxdb_database { $influxdb_database:
|
||||
admin_user => $admin_user,
|
||||
admin_password => $admin_password,
|
||||
influxdb_url => $influxdb_url,
|
||||
db_user => $influxdb_user,
|
||||
db_password => $influxdb_password,
|
||||
retention_period => $retention_period,
|
||||
replication_factor => $replication_factor,
|
||||
}
|
||||
|
||||
$influxdb_grafana = hiera('influxdb_grafana')
|
||||
|
||||
# influx ip
|
||||
prepare_network_config(hiera_hash('network_scheme', {}))
|
||||
$network_metadata = hiera_hash('network_metadata')
|
||||
$influxdb_nodes = get_nodes_hash_by_roles($network_metadata, ['influxdb_grafana', 'primary-influxdb_grafana'])
|
||||
$nodes_array = values($influxdb_nodes)
|
||||
# test for multiple inxlixdb nodes !!!
|
||||
$influxdb_server = $nodes_array[0]['network_roles']['management']
|
||||
#$influxdb_server = $influxdb_nodes[0]['internal_address']
|
||||
|
||||
$local_port = 8086
|
||||
$admin_user = 'root'
|
||||
$admin_password = $influxdb_grafana['influxdb_rootpass']
|
||||
$username = $influxdb_grafana['influxdb_username']
|
||||
$password = $influxdb_grafana['influxdb_userpass']
|
||||
$retention_period = $influxdb_grafana['retention_period']
|
||||
|
||||
}
|
||||
|
||||
$influxdb_url = "http://${influxdb_server}:${local_port}"
|
||||
$replication_factor = 3
|
||||
$database_name = 'ceilometer'
|
||||
|
||||
telemetry::influxdb_database { $database_name:
|
||||
admin_user => $admin_user,
|
||||
admin_password => $admin_password,
|
||||
influxdb_url => $influxdb_url,
|
||||
db_user => $username,
|
||||
db_password => $password,
|
||||
retention_period => $retention_period,
|
||||
replication_factor => $replication_factor,
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,10 +1,45 @@
|
|||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# == Class: telemetry
|
||||
#
|
||||
# Install and configure the core of the Heka service.
|
||||
#
|
||||
# === Parameters
|
||||
#
|
||||
# [*event_pipeline_file*]
|
||||
# TODO
|
||||
#
|
||||
# [*publishers*]
|
||||
# TODO
|
||||
#
|
||||
#
|
||||
# === Examples (TODO)
|
||||
#
|
||||
# class { 'telemetry':
|
||||
# event_pipeline_file => $event_pipeline_file,
|
||||
# publishers => $ceilometer_publishers,
|
||||
# }
|
||||
#
|
||||
|
||||
class telemetry (
|
||||
$event_pipeline_file,
|
||||
$publishers,
|
||||
) {
|
||||
|
||||
file { "${event_pipeline_file}":
|
||||
ensure => 'present',
|
||||
file { $event_pipeline_file:
|
||||
ensure => 'present',
|
||||
content => template('telemetry/event_pipeline.yaml.erb')
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
[samples_influxdb_output]
|
||||
type = "HttpOutput"
|
||||
[samples_influxdb_output]
|
||||
type = "HttpOutput"
|
||||
message_matcher = "Fields[payload_type] == 'txt' && Fields[payload_name] == 'sample_data'"
|
||||
encoder = "influxdb_encoder"
|
||||
address = "http://<%= @influxdb_server %>:8086/write?db=ceilometer&precision=ns"
|
||||
username = "root"
|
||||
password = "r00tme"
|
||||
address = "http://<%= @influxdb_address %>:<%= @influxdb_port %>/write?db=<%= @influxdb_database %>&precision=ns"
|
||||
username = "<%= @influxdb_user %>"
|
||||
password = "<%= @influxdb_password %>"
|
||||
http_timeout = 5000
|
||||
method = "POST"
|
||||
use_buffering = true
|
||||
|
|
|
@ -8,6 +8,9 @@
|
|||
version: 2.0.0
|
||||
requires: [globals]
|
||||
required_for: [logging]
|
||||
# TODO we dont have access to influx hiera, need we dependency on it?
|
||||
cross-depends:
|
||||
- name: influxdb-hiera
|
||||
role: ['/.*/']
|
||||
parameters:
|
||||
puppet_manifest: "puppet/manifests/hiera.pp"
|
||||
|
@ -170,6 +173,23 @@
|
|||
# timeout: 300
|
||||
# cwd: /
|
||||
|
||||
|
||||
- id: telemetry-configure-apt
|
||||
type: puppet
|
||||
version: 2.0.0
|
||||
# We use upload_nodes_info as an anchor to order the post-deployment tasks executed
|
||||
# by this plugin and the InfluxDB & Elasticsearch plugins. The dependency chain is:
|
||||
# Other plugins tasks -> upload_nodes_info -> (LMA collector tasks)
|
||||
requires: [post_deployment_start]
|
||||
required_for: [telemetry-integration-configuration, post_deployment_end]
|
||||
role: [primary-controller, controller]
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/configure_apt.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 600
|
||||
reexecute_on:
|
||||
- deploy_changes
|
||||
|
||||
# Integration tasks
|
||||
####################
|
||||
|
||||
|
@ -177,8 +197,10 @@
|
|||
type: puppet
|
||||
version: 2.1.0
|
||||
groups: [primary-controller, controller]
|
||||
required_for: [deploy_end]
|
||||
requires: [deploy_start, telemetry-ceilometer-controller]
|
||||
requires: [post_deployment_start,telemetry-ceilometer-controller]
|
||||
required_for: [post_deployment_end]
|
||||
# required_for: [deploy_end]
|
||||
# requires: [deploy_start, telemetry-ceilometer-controller]
|
||||
cross-depends:
|
||||
- name: primary-influxdb_grafana
|
||||
# - name: lma-hiera-override
|
||||
|
@ -207,6 +229,17 @@
|
|||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 300
|
||||
|
||||
- id: telemetry-heka
|
||||
type: puppet
|
||||
version: 2.1.0
|
||||
groups: [primary-controller, controller]
|
||||
required_for: [post_deployment_end]
|
||||
requires: [telemetry-influxdb-create-db]
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/heka.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 300
|
||||
|
||||
# skip base tasks
|
||||
- id: ceilometer-radosgw-user
|
||||
type: skipped
|
||||
|
|
|
@ -14,11 +14,11 @@ attributes:
|
|||
value: ''
|
||||
label: 'External Elasticsearch'
|
||||
description: "In case you want to use external Elasticsearch please specify an IP or DNS name here"
|
||||
weight: 11
|
||||
weight: 20
|
||||
type: "text"
|
||||
regex:
|
||||
source: '^[a-zA-Z\d][a-zA-Z\d_\-.]+$'
|
||||
error: "Invalid address or name."
|
||||
# regex:
|
||||
# source: '^[a-zA-Z\d][a-zA-Z\d_\-.]+$'
|
||||
# error: "Invalid address or name."
|
||||
restrictions:
|
||||
- condition: "settings:telemetry.advanced_settings.value == false"
|
||||
action: hide
|
||||
|
@ -27,7 +27,7 @@ attributes:
|
|||
value: '9200'
|
||||
label: 'External Elasticsearch port'
|
||||
description: "In case you want to use external Elasticsearch please specify the port number. Default is 9200"
|
||||
weight: 12
|
||||
weight: 30
|
||||
type: "text"
|
||||
regex:
|
||||
source: '^\d{0,5}$'
|
||||
|
@ -36,11 +36,11 @@ attributes:
|
|||
- condition: "settings:telemetry.advanced_settings.value == false"
|
||||
action: hide
|
||||
|
||||
influxdb_ip:
|
||||
influxdb_address:
|
||||
value: ''
|
||||
label: 'External InfluxDB'
|
||||
description: "In case you want to use external InfluxDB please spesify an IP or DNS name here"
|
||||
weight: 13
|
||||
weight: 40
|
||||
type: "text"
|
||||
regex:
|
||||
source: '^[a-zA-Z\d][a-zA-Z\d_\-.]+$'
|
||||
|
@ -53,7 +53,7 @@ attributes:
|
|||
value: '8086'
|
||||
label: 'External InfluxDB port'
|
||||
description: "In case you want to use external InfluxDB please spesify the port number. DEFAULT is 8086"
|
||||
weight: 14
|
||||
weight: 50
|
||||
type: "text"
|
||||
regex:
|
||||
source: '^\d{0,5}$'
|
||||
|
@ -62,28 +62,15 @@ attributes:
|
|||
- condition: "settings:telemetry.advanced_settings.value == false"
|
||||
action: hide
|
||||
|
||||
influxdb_admin_user:
|
||||
value: ''
|
||||
label: 'External InfluxDB admin user'
|
||||
description: "The admin username to access external InfluxDB"
|
||||
weight: 15
|
||||
influxdb_database:
|
||||
value: 'ceilometer'
|
||||
label: 'InfluxDB database name'
|
||||
description: ''
|
||||
weight: 60
|
||||
type: "text"
|
||||
regex:
|
||||
regex: ¬_empty_parameter
|
||||
source: '\S'
|
||||
error: "You must provide a username."
|
||||
restrictions:
|
||||
- condition: "settings:telemetry.advanced_settings.value == false"
|
||||
action: hide
|
||||
|
||||
influxdb_admin_pass:
|
||||
value: ''
|
||||
label: 'External InfluxDB admin password'
|
||||
description: "The admin password to access external InfluxDB."
|
||||
weight: 16
|
||||
type: "password"
|
||||
regex:
|
||||
source: '^[\S]{4,}$'
|
||||
error: "You must provide a password with at least 4 characters"
|
||||
error: "Invalid value"
|
||||
restrictions:
|
||||
- condition: "settings:telemetry.advanced_settings.value == false"
|
||||
action: hide
|
||||
|
@ -92,7 +79,7 @@ attributes:
|
|||
value: ''
|
||||
label: 'External InfluxDB user'
|
||||
description: "The username to access external InfluxDB"
|
||||
weight: 17
|
||||
weight: 90
|
||||
type: "text"
|
||||
regex:
|
||||
source: '\S'
|
||||
|
@ -105,7 +92,7 @@ attributes:
|
|||
value: ''
|
||||
label: 'External InfluxDB password'
|
||||
description: "The password to access external InfluxDB."
|
||||
weight: 18
|
||||
weight: 100
|
||||
type: "password"
|
||||
regex:
|
||||
source: '^[\S]{4,}$'
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -eux
|
||||
|
||||
ROOT="$(dirname "$(readlink -f "$0")")"
|
||||
MODULES_DIR="${ROOT}"/deployment_scripts/puppet/modules
|
||||
RPM_REPO="${ROOT}"/repositories/centos/
|
||||
DEB_REPO="${ROOT}"/repositories/ubuntu/
|
||||
|
||||
function get_package_path {
|
||||
FILE=$(basename "$1")
|
||||
if [[ "$1" == *.deb ]]; then
|
||||
echo "$DEB_REPO"/"$FILE"
|
||||
elif [[ "$1" == *.rpm ]]; then
|
||||
echo "$RPM_REPO"/"$FILE"
|
||||
else
|
||||
echo "Invalid URL for $1"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Download RPM or DEB packages and store them in the local repository directory
|
||||
function download_packages {
|
||||
while [ $# -gt 0 ]; do
|
||||
wget -qO - "$1" > "$(get_package_path "$1")"
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
# Download file and store it in the local directory
|
||||
function download_file {
|
||||
local URL=$1
|
||||
local FILE_NAME=$2
|
||||
local DESTINATION=$3
|
||||
mkdir -p $DESTINATION
|
||||
wget -qO $DESTINATION/$FILE_NAME $URL
|
||||
}
|
||||
|
||||
|
||||
# Download official Puppet module and store it in the local directory
|
||||
function download_puppet_module {
|
||||
rm -rf "${MODULES_DIR:?}"/"$1"
|
||||
mkdir -p "${MODULES_DIR}"/"$1"
|
||||
wget -qO- "$2" | tar -C "${MODULES_DIR}/$1" --strip-components=1 -xz
|
||||
}
|
||||
|
||||
function check_md5sum {
|
||||
FILE="$(get_package_path "$1")"
|
||||
echo "$2 $FILE" | md5sum --check --strict
|
||||
}
|
|
@ -1,5 +1,24 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Add here any the actions which are required before plugin build
|
||||
# like packages building, packages downloading from mirrors and so on.
|
||||
# The script should return 0 if there were no errors.
|
||||
set -eux
|
||||
|
||||
. "$(dirname "$(readlink -f "$0")")"/functions.sh
|
||||
|
||||
HEKA_VERSION="0.10.0"
|
||||
COLLECTOR_TAG="0.10.0"
|
||||
|
||||
# Download Heka deb package
|
||||
|
||||
download_packages \
|
||||
https://github.com/elemoine/heka/releases/download/ratelimit-1/heka_${HEKA_VERSION}_amd64.deb
|
||||
check_md5sum heka_${HEKA_VERSION}_amd64.deb 69514d94173181a8d1dcab769062fdac
|
||||
|
||||
# Download Heka puppet module from lma collector plugin
|
||||
|
||||
URL="https://github.com/openstack/fuel-plugin-lma-collector/archive/${COLLECTOR_TAG}.tar.gz"
|
||||
HEKA_MODULE_PATH="fuel-plugin-lma-collector-${COLLECTOR_TAG}/deployment_scripts/puppet/modules/heka"
|
||||
DESTINATION=deployment_scripts/puppet/modules
|
||||
TEMP_DIR=`mktemp -u`
|
||||
download_file $URL $COLLECTOR_TAG.tar.gz $TEMP_DIR
|
||||
tar -xf $TEMP_DIR/$COLLECTOR_TAG.tar.gz -C $DESTINATION --strip-components=4 $HEKA_MODULE_PATH
|
||||
rm -fr $TEMP_DIR
|
Loading…
Reference in New Issue