Fix ShellCheck warnings
This change fixes all problems reported by ShellCheck [1] which is a nice linting tool for sh/bash scripts. [1] http://www.shellcheck.net/about.html Change-Id: I7104597e309fed5a4d52f5fc39ab0f4350fb3217
This commit is contained in:
parent
e6c0b44eb0
commit
d7c011d613
|
@ -16,20 +16,21 @@
|
|||
set -e
|
||||
|
||||
function docker_pull_image {
|
||||
if [[ "$( docker images -q ${1})" == "" ]]; then
|
||||
docker pull ${1}
|
||||
if [[ $(docker images -q "${1}") == "" ]]; then
|
||||
docker pull "${1}"
|
||||
fi
|
||||
}
|
||||
|
||||
function docker_get_id {
|
||||
echo $(docker inspect --format="{{ .Id }}" ${1} 2>/dev/null)
|
||||
docker inspect --format="{{ .Id }}" "${1}" 2>/dev/null
|
||||
}
|
||||
|
||||
function docker_is_running {
|
||||
local IS_RUNNING=$(docker inspect --format="{{ .State.Running }}" ${1} 2>/dev/null)
|
||||
local IS_RUNNING
|
||||
IS_RUNNING=$(docker inspect --format="{{ .State.Running }}" "${1}" 2>/dev/null)
|
||||
[[ "${IS_RUNNING}" == "true" ]]
|
||||
}
|
||||
|
||||
function docker_shorten_id {
|
||||
echo ${1} | cut -c 1-12
|
||||
echo "${1}" | cut -c 1-12
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
|
||||
set -e
|
||||
|
||||
CURRENT_DIR=$(dirname $(readlink -f $0))
|
||||
[[ -f ${CURRENT_DIR}/../common/functions.sh ]] && . ${CURRENT_DIR}/../common/functions.sh
|
||||
CURRENT_DIR=$(dirname "$(readlink -f "$0")")
|
||||
[[ -f ${CURRENT_DIR}/../common/functions.sh ]] && . "${CURRENT_DIR}/../common/functions.sh"
|
||||
|
||||
DOCKER_NAME=elasticsearch
|
||||
DOCKER_IMAGE="dockerfile/elasticsearch"
|
||||
|
@ -46,7 +46,7 @@ if [[ "$(docker_get_id $DOCKER_NAME)" != "" ]]; then
|
|||
fi
|
||||
|
||||
if [[ ! -d $ES_DATA ]]; then
|
||||
mkdir $ES_DATA
|
||||
mkdir "$ES_DATA"
|
||||
elif [[ -f ${ES_DATA}/elasticsearch.yml ]]; then
|
||||
echo "Warning: ${ES_DATA}/elasticsearch.yaml already exists."
|
||||
fi
|
||||
|
@ -64,17 +64,17 @@ EOF
|
|||
|
||||
docker_pull_image ${DOCKER_IMAGE}
|
||||
|
||||
DOCKER_ID=$(timeout $RUN_TIMEOUT docker run -d -e ES_HEAP_SIZE=${ES_MEMORY}g -p ${ES_LISTEN_ADDRESS}:${ES_HTTP_PORT}:9200 -p ${ES_LISTEN_ADDRESS}:${ES_TRANSPORT_PORT}:9300 --name ${DOCKER_NAME} -v $ES_DATA:/data ${DOCKER_IMAGE} /elasticsearch/bin/elasticsearch -Des.config=/data/elasticsearch.yml)
|
||||
SHORT_ID=$(docker_shorten_id $DOCKER_ID)
|
||||
DOCKER_ID=$(timeout $RUN_TIMEOUT docker run -d -e ES_HEAP_SIZE="${ES_MEMORY}g" -p "${ES_LISTEN_ADDRESS}:${ES_HTTP_PORT}:9200" -p "${ES_LISTEN_ADDRESS}:${ES_TRANSPORT_PORT}:9300" --name "${DOCKER_NAME}" -v "$ES_DATA:/data" ${DOCKER_IMAGE} /elasticsearch/bin/elasticsearch -Des.config=/data/elasticsearch.yml)
|
||||
SHORT_ID=$(docker_shorten_id "$DOCKER_ID")
|
||||
|
||||
echo -n "Waiting for Elasticsearch to start"
|
||||
while ! curl http://${ES_LISTEN_ADDRESS}:${ES_HTTP_PORT} 1>/dev/null 2>&1; do
|
||||
while ! curl "http://${ES_LISTEN_ADDRESS}:${ES_HTTP_PORT}" 1>/dev/null 2>&1; do
|
||||
echo -n '.'
|
||||
IS_RUNNING=$(docker inspect --format="{{ .State.Running }}" ${DOCKER_ID})
|
||||
IS_RUNNING=$(docker inspect --format="{{ .State.Running }}" "${DOCKER_ID}")
|
||||
if [[ "${IS_RUNNING}" == "false" ]]; then
|
||||
echo ''
|
||||
echo "Container '${DOCKER_NAME}/${SHORT_ID}' failed to start!"
|
||||
docker logs $DOCKER_ID
|
||||
docker logs "$DOCKER_ID"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
|
@ -84,8 +84,8 @@ echo
|
|||
echo "Container '${DOCKER_NAME}/${SHORT_ID}' started successfully"
|
||||
|
||||
# Configure template for 'log-*' and 'notification-*' indices
|
||||
curl -s -XDELETE ${ES_URL}/_template/log 1>/dev/null
|
||||
curl -s -XPUT -d @log_index_template.json ${ES_URL}/_template/log 1>/dev/null
|
||||
curl -s -XPUT -d @notification_index_template.json ${ES_URL}/_template/notification 1>/dev/null
|
||||
curl -s -XDELETE "${ES_URL}/_template/log" 1>/dev/null
|
||||
curl -s -XPUT -d @log_index_template.json "${ES_URL}/_template/log" 1>/dev/null
|
||||
curl -s -XPUT -d @notification_index_template.json "${ES_URL}/_template/notification" 1>/dev/null
|
||||
|
||||
echo "Elasticsearch API avaiable at ${ES_URL}"
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
|
||||
set -e
|
||||
|
||||
CURRENT_DIR=$(dirname $(readlink -f $0))
|
||||
[[ -f ${CURRENT_DIR}/../common/functions.sh ]] && . ${CURRENT_DIR}/../common/functions.sh
|
||||
CURRENT_DIR=$(dirname "$(readlink -f "$0")")
|
||||
[[ -f ${CURRENT_DIR}/../common/functions.sh ]] && . "${CURRENT_DIR}/../common/functions.sh"
|
||||
|
||||
DOCKER_NAME=influxdb
|
||||
DOCKER_IMAGE="tutum/influxdb"
|
||||
|
@ -45,16 +45,16 @@ fi
|
|||
|
||||
docker_pull_image $DOCKER_IMAGE
|
||||
|
||||
DOCKER_ID=$(timeout $RUN_TIMEOUT docker run -d -p ${INFLUXDB_LISTEN_ADDRESS}:8083:8083 -p ${INFLUXDB_LISTEN_ADDRESS}:8086:8086 --expose 8090 --expose 8099 --name ${DOCKER_NAME} ${DOCKER_IMAGE})
|
||||
SHORT_ID=$(docker_shorten_id $DOCKER_ID)
|
||||
DOCKER_ID=$(timeout $RUN_TIMEOUT docker run -d -p "${INFLUXDB_LISTEN_ADDRESS}:8083:8083" -p "${INFLUXDB_LISTEN_ADDRESS}:8086:8086" --expose 8090 --expose 8099 --name ${DOCKER_NAME} ${DOCKER_IMAGE})
|
||||
SHORT_ID=$(docker_shorten_id "$DOCKER_ID")
|
||||
|
||||
echo -n "Waiting for InfluxDB to be up"
|
||||
while ! curl ${INFLUXDB_URL} 1>/dev/null 2>&1; do
|
||||
while ! curl "${INFLUXDB_URL}" 1>/dev/null 2>&1; do
|
||||
echo -n '.'
|
||||
if ! docker_is_running $DOCKER_ID; then
|
||||
if ! docker_is_running "$DOCKER_ID"; then
|
||||
echo ''
|
||||
echo "Container '${DOCKER_NAME}/${SHORT_ID}' failed to start!"
|
||||
docker logs $DOCKER_ID
|
||||
docker logs "$DOCKER_ID"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
|
@ -62,14 +62,14 @@ done
|
|||
echo
|
||||
echo "Container '${DOCKER_NAME}/${SHORT_ID}' started successfully"
|
||||
|
||||
curl -X POST "${INFLUXDB_URL}/cluster_admins/root?u=root&p=root" -d '{"password": "'${INFLUXDB_ROOT_PASSWORD}'"}'
|
||||
curl -X POST "${INFLUXDB_URL}/cluster_admins/root?u=root&p=root" -d '{"password": "'"${INFLUXDB_ROOT_PASSWORD}"'"}'
|
||||
|
||||
curl -X POST "${INFLUXDB_URL}/db?u=root&p=${INFLUXDB_ROOT_PASSWORD}" -d '{"name": "'${INFLUXDB_LMA_DB}'"}'
|
||||
curl -X POST "${INFLUXDB_URL}/db/${INFLUXDB_LMA_DB}/users?u=root&p=${INFLUXDB_ROOT_PASSWORD}" -d '{"name": "'${INFLUXDB_LMA_USER}'", "password": "'${INFLUXDB_LMA_PASSWORD}'"}'
|
||||
curl -X POST "${INFLUXDB_URL}/db?u=root&p=${INFLUXDB_ROOT_PASSWORD}" -d '{"name": "'"${INFLUXDB_LMA_DB}"'"}'
|
||||
curl -X POST "${INFLUXDB_URL}/db/${INFLUXDB_LMA_DB}/users?u=root&p=${INFLUXDB_ROOT_PASSWORD}" -d '{"name": "'"${INFLUXDB_LMA_USER}"'", "password": "'"${INFLUXDB_LMA_PASSWORD}"'"}'
|
||||
echo "InfluxDB provisioned with db=${INFLUXDB_LMA_DB}, user=${INFLUXDB_LMA_USER}, pass=${INFLUXDB_LMA_PASSWORD}"
|
||||
|
||||
curl -X POST "${INFLUXDB_URL}/db?u=root&p=${INFLUXDB_ROOT_PASSWORD}" -d '{"name": "'${INFLUXDB_GRAFANA_DB}'"}'
|
||||
curl -X POST "${INFLUXDB_URL}/db/${INFLUXDB_GRAFANA_DB}/users?u=root&p=${INFLUXDB_ROOT_PASSWORD}" -d '{"name": "'${INFLUXDB_LMA_USER}'", "password": "'${INFLUXDB_LMA_PASSWORD}'"}'
|
||||
curl -X POST "${INFLUXDB_URL}/db/${INFLUXDB_GRAFANA_DB}/users?u=root&p=${INFLUXDB_ROOT_PASSWORD}" -d '{"name": "'"${INFLUXDB_LMA_USER}"'", "password": "'"${INFLUXDB_LMA_PASSWORD}"'"}'
|
||||
echo "InfluxDB provisioned with db=${INFLUXDB_GRAFANA_DB}, user=${INFLUXDB_LMA_USER}, pass=${INFLUXDB_LMA_PASSWORD}"
|
||||
|
||||
echo "InfluxDB API available at ${INFLUXDB_URL}"
|
||||
|
|
|
@ -28,12 +28,12 @@
|
|||
set -e
|
||||
|
||||
function fail {
|
||||
echo $1
|
||||
echo "$1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function info {
|
||||
echo $1
|
||||
echo "$1"
|
||||
}
|
||||
|
||||
# Run pre-installation checks
|
||||
|
@ -70,7 +70,7 @@ else:
|
|||
sys.exit(1)
|
||||
EOF
|
||||
|
||||
CURRENT_DIR=$(dirname $(readlink -f $0))
|
||||
CURRENT_DIR=$(dirname "$(readlink -f "$0")")
|
||||
|
||||
UI_PORT=8081
|
||||
PRIMARY_IP_ADDRESS=$(hostname --ip-address)
|
||||
|
@ -89,15 +89,15 @@ if [[ $FREE_MEM -lt $MIN_MEM ]]; then
|
|||
fi
|
||||
|
||||
# Don't eat up all the free memory unless it is absolutely necessary
|
||||
if [[ $FREE_MEM -gt $(( 2 * $MIN_MEM )) ]]; then
|
||||
FREE_MEM=$(( $FREE_MEM - $MIN_MEM ))
|
||||
if [[ $FREE_MEM -gt $(( 2 * MIN_MEM )) ]]; then
|
||||
FREE_MEM=$(( FREE_MEM - MIN_MEM ))
|
||||
fi
|
||||
|
||||
# There is no point of allocating more than 32GB of RAM to the JVM
|
||||
if [[ $FREE_MEM -gt $MAX_MEM ]]; then
|
||||
FREE_MEM=$MAX_MEM
|
||||
fi
|
||||
ES_MEMORY=$(( $FREE_MEM / 1024 ))
|
||||
ES_MEMORY=$(( FREE_MEM / 1024 ))
|
||||
|
||||
info "Starting the installation of the LMA collector plugin..."
|
||||
|
||||
|
@ -110,34 +110,34 @@ yum install -y createrepo rpm dpkg-devel
|
|||
|
||||
info "Building the Fuel plugin..."
|
||||
rm -f ../lma_collector*fp
|
||||
if ! (cd ${CURRENT_DIR}/.. && fuel-plugin-builder --build ./); then
|
||||
if ! (cd "${CURRENT_DIR}/.." && fuel-plugin-builder --build ./); then
|
||||
fail "Failed to build the Fuel plugin."
|
||||
fi
|
||||
|
||||
info "Installing the Fuel plugin..."
|
||||
if ! (cd ${CURRENT_DIR}/.. && fuel plugins --force --install lma_collector*.fp); then
|
||||
if ! (cd "${CURRENT_DIR}/.." && fuel plugins --force --install lma_collector*.fp); then
|
||||
fail "Failed to install the Fuel plugin."
|
||||
fi
|
||||
|
||||
info "Building the documentation"
|
||||
pip install Sphinx
|
||||
if ! (cd ${CURRENT_DIR}/../doc && make html); then
|
||||
if ! (cd "${CURRENT_DIR}/../doc" && make html); then
|
||||
info "Couldn't build the documentation."
|
||||
fi
|
||||
|
||||
info "Starting the Elasticsearch container..."
|
||||
mkdir -p $ES_DIR
|
||||
if ! (cd ${CURRENT_DIR}/elasticsearch && ES_MEMORY=$ES_MEMORY ES_DATA=$ES_DIR ES_LISTEN_ADDRESS=$PRIMARY_IP_ADDRESS ./run_container.sh); then
|
||||
if ! (cd "${CURRENT_DIR}/elasticsearch" && ES_MEMORY=$ES_MEMORY ES_DATA=$ES_DIR ES_LISTEN_ADDRESS=$PRIMARY_IP_ADDRESS ./run_container.sh); then
|
||||
fail "Failed to start the Elasticsearch container."
|
||||
fi
|
||||
|
||||
info "Starting the InfluxDB container..."
|
||||
if ! (cd ${CURRENT_DIR}/influxdb && LISTEN_ADDRESS=$PRIMARY_IP_ADDRESS ./run_container.sh); then
|
||||
if ! (cd "${CURRENT_DIR}/influxdb" && LISTEN_ADDRESS=$PRIMARY_IP_ADDRESS ./run_container.sh); then
|
||||
fail "Failed to start the InfluxDB container."
|
||||
fi
|
||||
|
||||
info "Starting the LMA UI container..."
|
||||
if ! (cd ${CURRENT_DIR}/ui && docker build -t lma_ui . && docker run -d -p ${UI_PORT}:80 --name lma_ui lma_ui); then
|
||||
if ! (cd "${CURRENT_DIR}/ui" && docker build -t lma_ui . && docker run -d -p ${UI_PORT}:80 --name lma_ui lma_ui); then
|
||||
fail "Failed to start the LMA UI container."
|
||||
fi
|
||||
info "Kibana dashboard available at http://${PRIMARY_IP_ADDRESS}:${UI_PORT}/kibana/"
|
||||
|
|
|
@ -23,8 +23,8 @@ function convert_yes_no {
|
|||
fi
|
||||
}
|
||||
|
||||
KIBANA_ENABLED=$(convert_yes_no ${KIBANA_ENABLED:-yes})
|
||||
GRAFANA_ENABLED=$(convert_yes_no ${GRAFANA_ENABLED:-yes})
|
||||
KIBANA_ENABLED=$(convert_yes_no "${KIBANA_ENABLED:-yes}")
|
||||
GRAFANA_ENABLED=$(convert_yes_no "${GRAFANA_ENABLED:-yes}")
|
||||
|
||||
if [[ ${KIBANA_ENABLED} == "yes" ]]; then
|
||||
ES_HOST=${ES_HOST:-\"+window.location.hostname+\"}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
ROOT="$(dirname `readlink -f $0`)"
|
||||
ROOT="$(dirname "$(readlink -f "$0")")"
|
||||
MODULES_DIR="${ROOT}"/deployment_scripts/puppet/modules
|
||||
RPM_REPO="${ROOT}"/repositories/centos/
|
||||
DEB_REPO="${ROOT}"/repositories/ubuntu/
|
||||
|
@ -13,22 +13,22 @@ APACHE_TARBALL_URL="https://forgeapi.puppetlabs.com/v3/files/puppetlabs-apache-1
|
|||
|
||||
function download_packages {
|
||||
while [ $# -gt 0 ]; do
|
||||
FILENAME=$(basename $1)
|
||||
FILENAME=$(basename "$1")
|
||||
EXT=${FILENAME##*.}
|
||||
case ${EXT} in
|
||||
deb) REPO=$DEB_REPO;;
|
||||
rpm) REPO=$RPM_REPO;;
|
||||
esac
|
||||
|
||||
rm -f $REPO/$FILENAME
|
||||
wget -qO - $1 > $REPO/$FILENAME
|
||||
rm -f "$REPO"/"$FILENAME"
|
||||
wget -qO - "$1" > "$REPO"/"$FILENAME"
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
download_packages \
|
||||
https://github.com/mozilla-services/heka/releases/download/v${HEKA_VERSION}/heka_${HEKA_VERSION}_amd64.deb > ${DEB_REPO}/heka_${HEKA_VERSION}_amd64.deb \
|
||||
https://github.com/mozilla-services/heka/releases/download/v${HEKA_VERSION}/heka-${HEKA_VERSION//./_}-linux-amd64.rpm > ${RPM_REPO}/heka-${HEKA_VERSION//./_}-linux-amd64.rpm \
|
||||
https://github.com/mozilla-services/heka/releases/download/v${HEKA_VERSION}/heka_${HEKA_VERSION}_amd64.deb > "${DEB_REPO}"/heka_${HEKA_VERSION}_amd64.deb \
|
||||
https://github.com/mozilla-services/heka/releases/download/v${HEKA_VERSION}/heka-${HEKA_VERSION//./_}-linux-amd64.rpm > "${RPM_REPO}"/heka-${HEKA_VERSION//./_}-linux-amd64.rpm \
|
||||
http://mirror.centos.org/centos/6/os/x86_64/Packages/libdbi-0.8.3-4.el6.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/6/os/x86_64/Packages/libdbi-drivers-0.8.3-5.1.el6.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/6/os/x86_64/Packages/libdbi-dbd-mysql-0.8.3-5.1.el6.x86_64.rpm \
|
||||
|
@ -39,17 +39,17 @@ download_packages \
|
|||
|
||||
|
||||
# Extract dependent manifests from fuel-library
|
||||
rm -rf ${MODULES_DIR}/{cinder,glance,heat,inifile,keystone,neutron,nova,openstack,stdlib,concat}
|
||||
rm -rf "${MODULES_DIR:?}"/{cinder,glance,heat,inifile,keystone,neutron,nova,openstack,stdlib,concat}
|
||||
wget -qO- "${FUEL_LIB_TARBALL_URL}" | \
|
||||
tar -C "${MODULES_DIR}" --strip-components=3 -zxvf - \
|
||||
fuel-library-${FUEL_LIB_VERSION}/deployment/puppet/{cinder,glance,heat,inifile,keystone,neutron,nova,openstack,stdlib,concat}
|
||||
|
||||
rm -rf ${MODULES_DIR}/collectd
|
||||
mkdir -p ${MODULES_DIR}/collectd
|
||||
rm -rf "${MODULES_DIR:?}"/collectd
|
||||
mkdir -p "${MODULES_DIR}"/collectd
|
||||
wget -qO- "${COLLECTD_TARBALL_URL}" | tar -C "${MODULES_DIR}/collectd" --strip-components=1 -xz
|
||||
|
||||
# Apache is not available in Fuel 6.0. It will be available in 6.1. So until the switch to 6.1
|
||||
# we download it from puppetlabs.
|
||||
rm -rf ${MODULES_DIR}/apache
|
||||
mkdir -p ${MODULES_DIR}/apache
|
||||
rm -rf "${MODULES_DIR:?}"/apache
|
||||
mkdir -p "${MODULES_DIR}"/apache
|
||||
wget -qO- "${APACHE_TARBALL_URL}" | tar -C "${MODULES_DIR}/apache" --strip-components=1 -xz
|
||||
|
|
Loading…
Reference in New Issue