charm-cinder/hooks/cinder-hooks

313 lines
11 KiB
Bash
Executable File

#!/bin/bash -e
CHARM_DIR=$(dirname $0)
if [[ -e $CHARM_DIR/cinder-common ]] ; then
. $CHARM_DIR/cinder-common
else
juju-log "ERROR: Could not source cinder-common from $CHARM_DIR."
exit 1
fi
install_hook() {
install_source="$(config-get openstack-origin)"
# Check if we are deploying to Precise from distro.
# If so, we need to use the Cloud Archive instead of the
# Ubuntu Archive since Cinder does not exist there (for precise).
. /etc/lsb-release
[[ "$DISTRIB_CODENAME" == "precise" && "$install_source" == "distro" ]] &&
install_source="cloud:precise-folsom"
configure_install_source "$install_source"
apt-get update || true # ignore transient archive errors
pkgs=$(determine_packages)
juju-log "cinder: Installing following packages: $pkgs"
DEBIAN_FRONTEND=noninteractive apt-get -y install $pkgs
if service_enabled "volume" ; then
# prepare local storage if volume service is being installed.
block_dev=$(config-get block-device)
if [[ "$block_dev" != "None" && "$block_dev" != "none" ]] ; then
vol_group=$(config-get volume-group)
overwrite=$(config-get overwrite)
prepare_storage "$block_dev" "$vol_group" "$overwrite"
set_or_update "volume_group" "$vol_group"
cinder_ctl cinder-volume restart
fi
fi
configure_https
}
db_joined() {
juju-log "cinder: Requesting database access to cinder database."
relation-set database=$(config-get cinder-db) username=$(config-get db-user)
relation-set hostname=$(unit-get private-address)
}
db_changed() {
db_host=$(relation-get db_host)
db_password=$(relation-get password)
[[ -z "$db_host" ]] || [[ -z "$db_password" ]] &&
juju-log "Missing DB_HOST|DB_PASSWORD, peer not ready? Will retry." &&
exit 0
db_user=$(config-get db-user)
cinder_db=$(config-get cinder-db)
juju-log "cinder: Configuring cinder for database access to $cinder_db@$db_host"
set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$cinder_db"
cinder_ctl all stop
if eligible_leader 'res_cinder_vip'; then
/usr/bin/cinder-manage db sync
fi
cinder_ctl all start
}
amqp_joined() {
juju-log "cinder: Requesting amqp access to vhost $rabbit_vhost."
relation-set username=$(config-get rabbit-user)
relation-set vhost=$(config-get rabbit-vhost)
}
amqp_changed() {
rabbit_host=$(relation-get private-address)
rabbit_password=$(relation-get password)
[[ -z "$rabbit_host" ]] || [[ -z "$rabbit_password" ]] &&
juju-log "Missing rabbit_host||rabbit_passwd, peer not ready? Will retry." && exit 0
local clustered=$(relation-get clustered)
if [[ -n "$clustered" ]] ; then
juju-log "$CHARM - amqp_changed: Configuring for access to haclustered "\
"rabbitmq service."
local vip=$(relation-get vip)
[[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered bu no vip."\
&& exit 0
rabbit_host="$vip"
fi
juju-log "cinder: Configuring cinder for amqp access to $rabbit_host:$rabbit_vhost"
rabbit_user=$(config-get rabbit-user)
rabbit_vhost=$(config-get rabbit-vhost)
set_or_update rabbit_host $rabbit_host
set_or_update rabbit_userid $rabbit_user
set_or_update rabbit_password $rabbit_password
set_or_update rabbit_virtual_host $rabbit_vhost
cinder_ctl all restart
}
keystone_joined() {
# Exit hook execution if unit is not leader of cluster/service
eligible_leader 'res_cinder_vip' || return 0
# determine correct endpoint URL
https && scheme="https" || scheme="http"
is_clustered && local host=$(config-get vip) ||
local host=$(unit-get private-address)
local url="$scheme://$host:$(config-get api-listening-port)/v1/\$(tenant_id)s"
relation-set service="cinder" \
region="$(config-get region)" public_url="$url" admin_url="$url" internal_url="$url"
}
keystone_changed() {
service_port=$(relation-get service_port)
auth_port=$(relation-get auth_port)
service_username=$(relation-get service_username)
service_password=$(relation-get service_password)
service_tenant=$(relation-get service_tenant)
[[ -z "$service_port" ]] || [[ -z "$auth_port" ]] ||
[[ -z "$service_username" ]] || [[ -z "$service_password" ]] ||
[[ -z "$service_tenant" ]] && juju-log "keystone_changed: Peer not ready" &&
exit 0
service_host=$(relation-get service_host)
auth_host=$(relation-get auth_host)
# update keystone authtoken settings accordingly
set_or_update "service_host" "$service_host" "$API_CONF"
set_or_update "service_port" "$service_port" "$API_CONF"
set_or_update "auth_host" "$auth_host" "$API_CONF"
set_or_update "auth_port" "$auth_port" "$API_CONF"
set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
set_or_update "admin_user" "$service_username" "$API_CONF"
set_or_update "admin_password" "$service_password" "$API_CONF"
set_or_update "auth_protocol" "http" "$API_CONF"
set_or_update "auth_strategy" "keystone" "$CINDER_CONF"
cinder_ctl all restart
configure_https
}
function ceph_joined {
mkdir -p /etc/ceph
apt-get -y install ceph-common || exit 1
}
function ceph_changed {
SERVICE_NAME=`echo $JUJU_UNIT_NAME | cut -d / -f 1`
KEYRING=/etc/ceph/ceph.client.$SERVICE_NAME.keyring
KEY=`relation-get key`
if [ -n "$KEY" ]; then
# But only once
if [ ! -f $KEYRING ]; then
ceph-authtool $KEYRING \
--create-keyring --name=client.$SERVICE_NAME \
--add-key="$KEY"
chmod +r $KEYRING
fi
else
# No key - bail for the time being
exit 0
fi
MONS=`relation-list`
mon_hosts=""
for mon in $MONS; do
mon_hosts="$mon_hosts`relation-get private-address $mon`:6789,"
done
cat > /etc/ceph/ceph.conf << EOF
[global]
auth supported = $(relation-get auth)
keyring = /etc/ceph/\$cluster.\$name.keyring
mon host = $mon_hosts
EOF
# XXX: Horrid kludge to make cinder-volume use
# a different ceph username than admin
echo "CEPH_ARGS=--id $SERVICE_NAME" >> /etc/environment
# Also add it to the overrides for cinder volume
# in preparation for move to start-stop-daemon.
echo "env CEPH_ARGS=\"--id $SERVICE_NAME\"" > /etc/init/cinder-volume.override
# Only the leader should try to create pools
if eligible_leader 'res_cinder_vip'; then
# Create the cinder pool if it does not already exist
if ! rados --id $SERVICE_NAME lspools | grep -q cinder; then
rados --id $SERVICE_NAME mkpool cinder
fi
fi
# Reconfigure cinder-volume
set_or_update volume_driver cinder.volume.driver.RBDDriver
set_or_update rbd_pool cinder
# Set host to service name to ensure that requests get
# distributed across all cinder servers in a cluster
# as they can all service ceph requests.
set_or_update host "$SERVICE_NAME"
cinder_ctl "cinder-volume" restart
}
function cluster_changed() {
service_enabled "api" || return 0
[[ -z "$(peer_units)" ]] &&
juju-log "cluster_changed() with no peers." && exit 0
local cfg_api_port="$(config-get api-listening-port)"
local haproxy_port="$(determine_haproxy_port $cfg_api_port)"
local backend_port="$(determine_api_port $cfg_api_port)"
service cinder-api stop || :
configure_haproxy "cinder_api:$haproxy_port:$backend_port"
set_or_update osapi_volume_listen_port "$backend_port"
service cinder-api start
}
function upgrade_charm() {
cluster_changed
}
function ha_relation_joined() {
local corosync_bindiface=`config-get ha-bindiface`
local corosync_mcastport=`config-get ha-mcastport`
local vip=`config-get vip`
local vip_iface=`config-get vip_iface`
local vip_cidr=`config-get vip_cidr`
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
[ -n "$corosync_mcastport" ]; then
# TODO: This feels horrible but the data required by the hacluster
# charm is quite complex and is python ast parsed.
resources="{
'res_cinder_vip':'ocf:heartbeat:IPaddr2',
'res_cinder_haproxy':'lsb:haproxy'
}"
resource_params="{
'res_cinder_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
'res_cinder_haproxy': 'op monitor interval=\"5s\"'
}"
init_services="{
'res_cinder_haproxy':'haproxy'
}"
clones="{
'cl_cinder_haproxy': 'res_cinder_haproxy'
}"
relation-set corosync_bindiface=$corosync_bindiface \
corosync_mcastport=$corosync_mcastport \
resources="$resources" resource_params="$resource_params" \
init_services="$init_services" clones="$clones"
else
juju-log "Insufficient configuration data to configure hacluster"
exit 1
fi
}
function ha_relation_changed() {
local clustered=`relation-get clustered`
if [ -n "$clustered" ] && is_leader 'res_cinder_vip'; then
juju-log "Cluster leader, reconfiguring keystone endpoint"
https && local scheme="https" || local scheme="http"
local url="$scheme://$(config-get vip):$(config-get api-listening-port)/v1/\$(tenant_id)s"
local r_id=""
for r_id in `relation-ids identity-service`; do
relation-set -r $r_id service="cinder" \
region="$(config-get region)" \
public_url="$url" admin_url="$url" internal_url="$url"
done
fi
}
function config_changed() {
configure_https
# Save our scriptrc env variables for health checks
declare -a env_vars=(
"OPENSTACK_PORT_MCASTPORT=$(config-get ha-mcastport)"
'OPENSTACK_SERVICE_API=cinder-api'
'OPENSTACK_SERVICE_SCHEDULER=cinder-scheduler'
'OPENSTACK_SERVICE_VOLUME=cinder-volume')
save_script_rc ${env_vars[@]}
}
function image-service_changed {
GLANCE_API_SERVER=`relation-get glance-api-server`
if [[ -z $GLANCE_API_SERVER ]] ; then
echo "image-service_changed: GLANCE_API_SERVER not yet set. Exit 0 and retry"
exit 0
fi
set_or_update glance_api_servers $GLANCE_API_SERVER
apt-get -y install qemu-utils
cinder_ctl all restart
}
arg0=$(basename $0)
juju-log "cinder: Attempting to fire hook for: $arg0"
case $arg0 in
"install") install_hook ;;
"start") cinder_ctl all start;;
"stop") cinder_ctl all stop;;
"shared-db-relation-joined") db_joined ;;
"shared-db-relation-changed") db_changed ;;
"amqp-relation-joined") amqp_joined ;;
"amqp-relation-changed") amqp_changed ;;
"identity-service-relation-joined") keystone_joined ;;
"identity-service-relation-changed") keystone_changed ;;
"ceph-relation-joined") ceph_joined;;
"ceph-relation-changed") ceph_changed;;
"cinder-volume-service-relation-joined") exit 0 ;;
"cinder-volume-service-relation-changed") exit 0 ;;
"cluster-relation-changed") cluster_changed ;;
"cluster-relation-departed") cluster_changed ;;
"image-service-relation-changed") image-service_changed ;;
"ha-relation-joined") ha_relation_joined ;;
"ha-relation-changed") ha_relation_changed ;;
"upgrade-charm") upgrade_charm ;;
"config-changed") config_changed ;;
*) exit 0
esac