618 lines
22 KiB
Bash
Executable File
618 lines
22 KiB
Bash
Executable File
#!/bin/bash -e
|
|
|
|
CHARM_DIR=$(dirname $0)
|
|
arg0=$(basename $0)
|
|
|
|
if [[ -e $CHARM_DIR/nova-cloud-controller-common ]] ; then
|
|
. $CHARM_DIR/nova-cloud-controller-common
|
|
else
|
|
juju-log "ERROR: Could not load nova-cloud-controller-common from $CHARM_DIR"
|
|
fi
|
|
|
|
function install_hook {
|
|
juju-log "$CHARM: Installing nova packages"
|
|
apt-get -y install python-software-properties || exit 1
|
|
configure_install_source "$(config-get openstack-origin)"
|
|
apt-get update || exit 1
|
|
|
|
determine_packages
|
|
DEBIAN_FRONTEND=noninteractive apt-get -y \
|
|
install --no-install-recommends $PACKAGES || exit 1
|
|
|
|
if [[ "$NET_MANAGER" == "Quantum" ]] ; then
|
|
determine_quantum_config
|
|
fi
|
|
configure_network_manager $NET_MANAGER
|
|
|
|
# Configure any flags specified in deployment config
|
|
set_config_flags
|
|
|
|
# Open up the various API endpoints
|
|
# EC2
|
|
open-port 8773
|
|
# osapi-compute
|
|
open-port 8774
|
|
# object-store / s3
|
|
open-port 3333
|
|
# Quantum API if configured
|
|
if [ "$NET_MANAGER" == "Quantum" ]; then
|
|
open-port 9696
|
|
fi
|
|
|
|
# Helpers for creating external and tenant networks
|
|
cp files/create_ext_net.py /usr/bin/quantum-ext-net
|
|
cp files/create_tenant_net.py /usr/bin/quantum-tenant-net
|
|
|
|
service_ctl all stop
|
|
configure_https
|
|
}
|
|
|
|
function upgrade_charm {
|
|
install_hook
|
|
service_ctl all start
|
|
}
|
|
|
|
function config_changed {
|
|
|
|
# Determine whether or not we should do an upgrade, based on whether or not
|
|
# the version offered in openstack-origin is greater than what is installed.
|
|
|
|
local install_src=$(config-get openstack-origin)
|
|
local cur=$(get_os_codename_package "nova-common")
|
|
local available=$(get_os_codename_install_source "$install_src")
|
|
|
|
if dpkg --compare-versions $(get_os_version_codename "$cur") lt \
|
|
$(get_os_version_codename "$available") ; then
|
|
juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available."
|
|
determine_packages
|
|
do_openstack_upgrade "$install_src" $PACKAGES
|
|
fi
|
|
|
|
set_config_flags
|
|
|
|
if [ "$NET_MANAGER" == "Quantum" ] && \
|
|
eligible_leader 'res_nova_vip' || \
|
|
! is_clustered; then
|
|
configure_quantum_networking
|
|
fi
|
|
|
|
determine_services
|
|
service_ctl all restart
|
|
configure_https
|
|
}
|
|
|
|
function amqp_joined {
|
|
# we request a username on the rabbit queue
|
|
# and store it in nova.conf. our response is its IP + PASSWD
|
|
# but we configure that in _changed
|
|
local rabbit_user=$(config-get rabbit-user)
|
|
local rabbit_vhost=$(config-get rabbit-vhost)
|
|
juju-log "$CHARM - amqp_joined: requesting credentials for $rabbit_user"
|
|
relation-set username=$rabbit_user
|
|
relation-set vhost=$rabbit_vhost
|
|
}
|
|
|
|
function amqp_changed {
|
|
# server creates our credentials and tells us where
|
|
# to connect. for now, using default vhost '/'
|
|
local rabbit_host=$(relation-get private-address)
|
|
local rabbit_password=$(relation-get password)
|
|
|
|
if [[ -z $rabbit_host ]] || \
|
|
[[ -z $rabbit_password ]] ; then
|
|
juju-log "$CHARM - amqp_changed: rabbit_host||rabbit_password not set."
|
|
exit 0
|
|
fi
|
|
|
|
# if the rabbitmq service is clustered among nodes with hacluster,
|
|
# point to its vip instead of its private-address.
|
|
local clustered=$(relation-get clustered)
|
|
if [[ -n "$clustered" ]] ; then
|
|
juju-log "$CHARM - ampq_changed: Configuring for "\
|
|
"access to haclustered rabbitmq service."
|
|
local vip=$(relation-get vip)
|
|
[[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered but no vip."\
|
|
&& exit 0
|
|
rabbit_host="$vip"
|
|
fi
|
|
|
|
local rabbit_user=$(config-get rabbit-user)
|
|
local rabbit_vhost=$(config-get rabbit-vhost)
|
|
juju-log "$CHARM - amqp_changed: Setting rabbit config in nova.conf: " \
|
|
"$rabbit_user@$rabbit_host/$rabbit_vhost"
|
|
set_or_update rabbit_host $rabbit_host
|
|
set_or_update rabbit_userid $rabbit_user
|
|
set_or_update rabbit_password $rabbit_password
|
|
set_or_update rabbit_virtual_host $rabbit_vhost
|
|
|
|
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
|
set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF"
|
|
set_or_update rabbit_userid "$rabbit_user" "$QUANTUM_CONF"
|
|
set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF"
|
|
set_or_update rabbit_virtual_host "$rabbit_vhost" "$QUANTUM_CONF"
|
|
fi
|
|
|
|
determine_services && service_ctl all restart
|
|
}
|
|
|
|
function db_joined {
|
|
# tell mysql provider which database we want. it will create it and give us
|
|
# credentials
|
|
local nova_db=$(config-get nova-db)
|
|
local db_user=$(config-get db-user)
|
|
local hostname=$(unit-get private-address)
|
|
juju-log "$CHARM - db_joined: requesting database access to $nova_db for "\
|
|
"$db_user@$hostname"
|
|
relation-set nova_database=$nova_db nova_username=$db_user nova_hostname=$hostname
|
|
if [ "$NET_MANAGER" == "Quantum" ]; then
|
|
relation-set quantum_database=quantum quantum_username=quantum quantum_hostname=$hostname
|
|
fi
|
|
}
|
|
|
|
function db_changed {
|
|
local db_host=`relation-get private-address`
|
|
local db_password=`relation-get nova_password`
|
|
|
|
if [[ -z $db_host ]] || [[ -z $db_password ]] ; then
|
|
juju-log "$CHARM - db_changed: db_host||db_password set, will retry."
|
|
exit 0
|
|
fi
|
|
|
|
local nova_db=$(config-get nova-db)
|
|
local db_user=$(config-get db-user)
|
|
juju-log "$CHARM - db_changed: Configuring nova.conf for access to $nova_db"
|
|
|
|
set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$nova_db"
|
|
|
|
if [ "$NET_MANAGER" == "Quantum" ]; then
|
|
local quantum_db_password=`relation-get quantum_password`
|
|
determine_quantum_config
|
|
set_or_update sql_connection "mysql://quantum:$quantum_db_password@$db_host/quantum?charset=utf8" \
|
|
$QUANTUM_PLUGIN_CONF "DATABASE"
|
|
fi
|
|
determine_services
|
|
service_ctl all stop
|
|
|
|
eligible_leader 'res_nova_vip' && /usr/bin/nova-manage db sync
|
|
|
|
service_ctl all start
|
|
if [ "$NET_MANAGER" == "Quantum" ]; then
|
|
configure_quantum_networking
|
|
fi
|
|
trigger_remote_service_restarts
|
|
}
|
|
|
|
function image-service_changed {
|
|
local api_server=$(relation-get glance-api-server)
|
|
[[ -z $api_server ]] &&
|
|
juju-log "$CHARM - image-service_changed: Peer not ready?" && exit 0
|
|
set_or_update glance_api_servers $api_server
|
|
set_or_update image_service "nova.image.glance.GlanceImageService"
|
|
determine_services && service_ctl all restart
|
|
}
|
|
|
|
function keystone_joined {
|
|
# we need to get two entries into keystone's catalog, nova + ec2
|
|
# group, them by prepending $service_ to each setting. the keystone
|
|
# charm will assemble settings into corresponding catalog entries
|
|
eligible_leader 'res_nova_vip' || return 0
|
|
|
|
is_clustered && local host=$(config-get vip) ||
|
|
local host=$(unit-get private-address)
|
|
https && local scheme="https" || local scheme="http"
|
|
|
|
local nova_url="$scheme://$host:8774/v1.1/\$(tenant_id)s"
|
|
local ec2_url="$scheme://$host:8773/services/Cloud"
|
|
local s3_url="$scheme://$host:3333"
|
|
local region="$(config-get region)"
|
|
local quantum_url="$scheme://$host:9696"
|
|
|
|
# these are the default endpoints
|
|
relation-set nova_service="nova" \
|
|
nova_region="$region" \
|
|
nova_public_url="$nova_url" \
|
|
nova_admin_url="$nova_url" \
|
|
nova_internal_url="$nova_url" \
|
|
ec2_service="ec2" \
|
|
ec2_region="$region" \
|
|
ec2_public_url="$ec2_url" \
|
|
ec2_admin_url="$ec2_url" \
|
|
ec2_internal_url="$ec2_url" \
|
|
s3_service="s3" \
|
|
s3_region="$region" \
|
|
s3_public_url="$s3_url" \
|
|
s3_admin_url="$s3_url" \
|
|
s3_internal_url="$s3_url"
|
|
|
|
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
|
relation-set quantum_service="quantum" \
|
|
quantum_region="$region" \
|
|
quantum_public_url="$quantum_url" \
|
|
quantum_admin_url="$quantum_url" \
|
|
quantum_internal_url="$quantum_url"
|
|
fi
|
|
|
|
# tack on an endpoint for nova-volume a relation exists.
|
|
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
|
|
nova_vol_url="$scheme://$host:$vol_port/v1/\$(tenant_id)s"
|
|
relation-set nova-volume_service="nova-volume" \
|
|
nova-volume_region="$region" \
|
|
nova-volume_public_url="$nova_vol_url" \
|
|
nova-volume_admin_url="$nova_vol_url" \
|
|
nova-volume_internal_url="$nova_vol_url"
|
|
fi
|
|
}
|
|
|
|
function keystone_changed {
|
|
token=$(relation-get admin_token)
|
|
service_port=$(relation-get service_port)
|
|
auth_port=$(relation-get auth_port)
|
|
service_username=$(relation-get service_username)
|
|
service_password=$(relation-get service_password)
|
|
service_tenant=$(relation-get service_tenant)
|
|
region=$(config-get region)
|
|
|
|
[[ -z "$token" ]] || [[ -z "$service_port" ]] || [[ -z "$auth_port" ]] ||
|
|
[[ -z "$service_username" ]] || [[ -z "$service_password" ]] ||
|
|
[[ -z "$service_tenant" ]] &&
|
|
juju-log "$CHARM - keystone_changed: Peer not ready" && exit 0
|
|
|
|
[[ "$token" == "-1" ]] &&
|
|
juju-log "$CHARM - keystone_changed: admin token error" && exit 1
|
|
|
|
# No need to update paste deploy pipelines, just set a flag in nova.conf
|
|
set_or_update "auth_strategy" "keystone"
|
|
|
|
# Update keystone authentication configuration
|
|
service_host=$(relation-get service_host)
|
|
auth_host=$(relation-get auth_host)
|
|
set_or_update "keystone_ec2_url" "http://$service_host:$service_port/v2.0/ec2tokens"
|
|
|
|
if grep -q use_deprecated_auth $NOVA_CONF ; then
|
|
juju-log "$CHARM - keystone_changed: Disabling '--use_deprecated_auth"
|
|
sed -i '/--use_deprecated_auth/d' $NOVA_CONF
|
|
fi
|
|
|
|
# update keystone authtoken settings accordingly
|
|
set_or_update "service_host" "$service_host" "$API_CONF"
|
|
set_or_update "service_port" "$service_port" "$API_CONF"
|
|
set_or_update "auth_host" "$auth_host" "$API_CONF"
|
|
set_or_update "auth_port" "$auth_port" "$API_CONF"
|
|
# XXX http hard-coded
|
|
set_or_update "auth_uri" "http://$service_host:$service_port/" "$API_CONF"
|
|
set_or_update "admin_token" "$token" "$API_CONF"
|
|
set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
|
|
set_or_update "admin_user" "$service_username" "$API_CONF"
|
|
set_or_update "admin_password" "$service_password" "$API_CONF"
|
|
|
|
if [ "$NET_MANAGER" == "Quantum" ]; then
|
|
# Configure Nova for quantum
|
|
keystone_url="http://${auth_host}:${auth_port}/v2.0"
|
|
set_or_update "quantum_url" "http://$(unit-get private-address):9696"
|
|
set_or_update "quantum_admin_tenant_name" "${service_tenant}"
|
|
set_or_update "quantum_admin_username" "${service_username}"
|
|
set_or_update "quantum_admin_password" "${service_password}"
|
|
set_or_update "quantum_admin_auth_url" "${keystone_url}"
|
|
# Configure API server for quantum
|
|
set_or_update "admin_tenant_name" "$service_tenant" "$QUANTUM_API_CONF" "filter:authtoken"
|
|
set_or_update "admin_user" "$service_username" "$QUANTUM_API_CONF" "filter:authtoken"
|
|
set_or_update "admin_password" "$service_password" "$QUANTUM_API_CONF" "filter:authtoken"
|
|
set_or_update "auth_host" "$auth_host" "$QUANTUM_API_CONF" "filter:authtoken"
|
|
set_or_update "auth_port" "$auth_port" "$QUANTUM_API_CONF" "filter:authtoken"
|
|
# Save a local copy of the credentials for later use
|
|
cat > /etc/quantum/novarc << EOF
|
|
export OS_USERNAME=${service_username}
|
|
export OS_PASSWORD=${service_password}
|
|
export OS_TENANT_NAME=${service_tenant}
|
|
export OS_AUTH_URL=${keystone_url}
|
|
export OS_REGION_NAME=$region
|
|
EOF
|
|
fi
|
|
|
|
determine_services && service_ctl all restart
|
|
|
|
if [ "$NET_MANAGER" == "Quantum" ]; then
|
|
configure_quantum_networking
|
|
# ripple out changes to identity to connected services
|
|
# which use cloud-controller as source of information for
|
|
# keystone
|
|
local r_ids="$(relation-ids cloud-compute) $(relation-ids quantum-network-service)"
|
|
for id in $r_ids ; do
|
|
relation-set -r $id \
|
|
keystone_host=$auth_host \
|
|
auth_port=$auth_port \
|
|
service_port=$service_port \
|
|
service_username=$service_username \
|
|
service_password=$service_password \
|
|
service_tenant=$service_tenant \
|
|
region=$region \
|
|
# XXX http hard-coded
|
|
auth_uri="http://$service_host:$service_port/"
|
|
|
|
done
|
|
fi
|
|
configure_https
|
|
}
|
|
|
|
volume_joined() {
|
|
local svc=""
|
|
case "$arg0" in
|
|
"cinder-volume-service-relation-joined") svc="cinder" ;;
|
|
"nova-volume-service-relation-joined") svc="nova-volume" ;;
|
|
*) svc="nova-volume" ;;
|
|
esac
|
|
|
|
local cur_vers=$(get_os_codename_package "nova-common")
|
|
if [[ "$cur_vers" != "essex" ]] && [[ "$cur_vers" != "folsom" ]] &&
|
|
[[ "$svc" == "nova-volume" ]] ; then
|
|
juju-log "$CHARM: WARNING nova-volume is only supported on Essex "\
|
|
"and Folsom. Ignoring new relation to nova-volume service."
|
|
exit 0
|
|
fi
|
|
|
|
configure_volume_service "$svc"
|
|
deteremine_services && service_ctl all restart
|
|
|
|
# The nova-volume API can be hosted here alongside the other
|
|
# nova API services, but there needs to be a new endpoint
|
|
# configured in keystone.
|
|
if [[ "$svc" == "nova-volume" ]] ; then
|
|
apt-get -y install nova-api-os-volume
|
|
local nova_vol_url="http://$(unit-get private-address):8776/v1/\$(tenant_id)s"
|
|
local r_ids=$(relation-ids identity-service)
|
|
for id in $r_ids ; do
|
|
juju-log "$CHARM: Registering new endpoint for nova-volume API on "\
|
|
"existing identity-service relation: $id"
|
|
nova_vol_url="http://$(unit-get private-address):8776/v1/\$(tenant_id)s"
|
|
relation-set -r $id nova-volume_service="nova-volume" \
|
|
nova-volume_region="$(config-get region)" \
|
|
nova-volume_public_url="$nova_vol_url" \
|
|
nova-volume_admin_url="$nova_vol_url" \
|
|
nova-volume_internal_url="$nova_vol_url"
|
|
done
|
|
fi
|
|
|
|
if [[ "$svc" == "cinder" ]] ; then
|
|
# Compute nodes need to be notified to set their volume
|
|
# driver accordingly.
|
|
r_ids=$(relation-ids cloud-compute)
|
|
for id in $r_ids ; do
|
|
relation-set -r $id volume_service="cinder"
|
|
done
|
|
fi
|
|
}
|
|
|
|
compute_joined() {
|
|
local r_id="$1"
|
|
[[ -n "$r_id" ]] && r_id="-r $r_id"
|
|
eligible_leader 'res_nova_vip' || return 0
|
|
relation-set $r_id network_manager=$(config-get network-manager)
|
|
# XXX Should point to VIP if clustered, or this may not even be needed.
|
|
relation-set $r_id ec2_host=$(unit-get private-address)
|
|
|
|
local sect="filter:authtoken"
|
|
keystone_host=$(local_config_get $API_CONF auth_host $sect)
|
|
|
|
if [ "$NET_MANAGER" == "Quantum" ]; then
|
|
if [[ -n "$keystone_host" ]]; then
|
|
relation-set $r_id \
|
|
keystone_host=$keystone_host \
|
|
auth_port=$(local_config_get $API_CONF auth_port $sect) \
|
|
service_port=$(local_config_get $API_CONF service_port $sect) \
|
|
service_username=$(local_config_get $API_CONF admin_user $sect) \
|
|
service_password=$(local_config_get $API_CONF admin_password $sect) \
|
|
service_tenant=$(local_config_get $API_CONF admin_tenant_name $sect) \
|
|
auth_uri=$(local_config_get $API_CONF auth_uri $sect)
|
|
|
|
fi
|
|
is_clustered && local host=$(config-get vip) ||
|
|
local host=$(unit-get private-address)
|
|
https && local scheme="https" || local scheme="http"
|
|
local quantum_url="$scheme:$host:9696"
|
|
|
|
relation-set $r_id quantum_url=$quantum_url \
|
|
quantum_plugin=$(config-get quantum-plugin) \
|
|
region=$(config-get region)
|
|
|
|
fi
|
|
|
|
# must pass on the keystone CA certficiate, if it exists.
|
|
cert="/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt"
|
|
if [[ -n "$keystone_host" ]] &&
|
|
[[ -e $cert ]] ;
|
|
then
|
|
cert=$(cat $cert | base64)
|
|
relation-set $r_id ca_cert="$cert"
|
|
fi
|
|
|
|
# volume driver is dependent on os version, or presence
|
|
# of cinder (on folsom, at least)
|
|
local cur_vers=$(get_os_codename_package "nova-common")
|
|
local vol_drv="cinder"
|
|
case "$cur_vers" in
|
|
"essex")
|
|
vol_drv="nova-volume"
|
|
;;
|
|
"folsom")
|
|
[[ -z "$(relation-ids cinder-volume-service)" ]] && vol_drv="nova-volume"
|
|
;;
|
|
esac
|
|
relation-set $r_id volume_service="$vol_drv"
|
|
}
|
|
|
|
compute_changed() {
|
|
local migration_auth="$(relation-get migration_auth_type)"
|
|
[[ -z "$migration_auth" ]] &&
|
|
juju-log "$CHARM: compute_changed - Peer not ready or "\
|
|
"no migration auth. configured." && exit 0
|
|
|
|
case "$migration_auth" in
|
|
"ssh") ssh_compute add ;;
|
|
esac
|
|
}
|
|
|
|
compute_departed() {
|
|
ssh_compute remove
|
|
}
|
|
|
|
function quantum_joined() {
|
|
# Tell quantum service about keystone
|
|
eligible_leader || return 0
|
|
|
|
local sect="filter:authtoken"
|
|
keystone_host=$(local_config_get $API_CONF auth_host $sect)
|
|
if [ -n "$keystone_host" ]; then
|
|
relation-set \
|
|
keystone_host=$keystone_host \
|
|
auth_port=$(local_config_get $API_CONF auth_port $sect) \
|
|
service_port=$(local_config_get $API_CONF service_port $sect) \
|
|
service_username=$(local_config_get $API_CONF admin_user $sect) \
|
|
service_password=$(local_config_get $API_CONF admin_password $sect) \
|
|
service_tenant=$(local_config_get $API_CONF admin_tenant_name $sect) \
|
|
auth_uri=$(local_config_get $API_CONF auth_uri $sect)
|
|
fi
|
|
|
|
is_clustered && local host=$(config-get vip) ||
|
|
local host=$(unit-get private-address)
|
|
https && local scheme="https" || local scheme="http"
|
|
local quantum_url="$scheme:$host:9696"
|
|
|
|
relation-set $r_id quantum_url=$quantum_url \
|
|
quantum_plugin=$(config-get quantum-plugin) \
|
|
region=$(config-get region)
|
|
|
|
}
|
|
|
|
function cluster_changed() {
|
|
[[ -z "$(peer_units)" ]] &&
|
|
juju-log "cluster_changed() with no peers." && exit 0
|
|
# upstartService:defaultPort:configOption
|
|
local svcs="nova-api-ec2:8773:ec2_listen_port
|
|
nova-api-os-compute:8774:osapi_compute_listen_port
|
|
nova-objectstore:3333:s3_listen_port"
|
|
[[ "$NET_MANAGER" == "Quantum" ]] &&
|
|
svcs="$svcs quantum-server:9696:bind_port"
|
|
|
|
for s in $svcs ; do
|
|
local service=$(echo $s | cut -d: -f1)
|
|
local port=$(echo $s | cut -d: -f2)
|
|
local opt=$(echo $s | cut -d: -f3)
|
|
local next_server="$(determine_haproxy_port $port)"
|
|
local api_port="$(determine_api_port $port)"
|
|
local haproxy_port_maps="$haproxy_port_maps $service:$next_server:$api_port"
|
|
set_or_update "$opt" "$api_port"
|
|
service_ctl $service restart
|
|
done
|
|
configure_haproxy $haproxy_port_maps
|
|
}
|
|
|
|
function ha_relation_joined() {
|
|
local corosync_bindiface=`config-get ha-bindiface`
|
|
local corosync_mcastport=`config-get ha-mcastport`
|
|
local vip=`config-get vip`
|
|
local vip_iface=`config-get vip_iface`
|
|
local vip_cidr=`config-get vip_cidr`
|
|
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
|
|
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
|
|
[ -n "$corosync_mcastport" ]; then
|
|
# TODO: This feels horrible but the data required by the hacluster
|
|
# charm is quite complex and is python ast parsed.
|
|
resources="{
|
|
'res_nova_vip':'ocf:heartbeat:IPaddr2',
|
|
'res_nova_haproxy':'lsb:haproxy'
|
|
}"
|
|
resource_params="{
|
|
'res_nova_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
|
|
'res_nova_haproxy': 'op monitor interval=\"5s\"'
|
|
}"
|
|
init_services="{
|
|
'res_nova_haproxy':'haproxy'
|
|
}"
|
|
clones="{
|
|
'cl_nova_haproxy':'res_nova_haproxy'
|
|
}"
|
|
relation-set corosync_bindiface=$corosync_bindiface \
|
|
corosync_mcastport=$corosync_mcastport \
|
|
resources="$resources" resource_params="$resource_params" \
|
|
init_services="$init_services" clones="$groups"
|
|
else
|
|
juju-log "Insufficient configuration data to configure hacluster"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
function ha_relation_changed() {
|
|
local clustered=`relation-get clustered`
|
|
if [ -n "$clustered" ] && is_leader 'res_nova_vip'; then
|
|
https && local scheme="https" || local scheme="http"
|
|
for r_id in `relation-ids identity-service`; do
|
|
local address=$(config-get vip)
|
|
local nova_url="$scheme://$address:8774/v1.1/\$(tenant_id)s"
|
|
local ec2_url="$scheme://$address:8773/services/Cloud"
|
|
local s3_url="$scheme://$address:3333"
|
|
local quantum_url="$scheme://$address:9696"
|
|
local nova_vol_url="$scheme://$address:8776/v1/\$(tenant_id)s"
|
|
|
|
relation-set -r $r_id \
|
|
nova_public_url="$nova_url" \
|
|
nova_admin_url="$nova_url" \
|
|
nova_internal_url="$nova_url" \
|
|
ec2_public_url="$ec2_url" \
|
|
ec2_admin_url="$ec2_url" \
|
|
ec2_internal_url="$ec2_url" \
|
|
s3_public_url="$s3_url" \
|
|
s3_admin_url="$s3_url" \
|
|
s3_internal_url="$s3_url"
|
|
|
|
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
|
relation-set -r $r_id \
|
|
quantum_public_url="$quantum_url" \
|
|
quantum_admin_url="$quantum_url" \
|
|
quantum_internal_url="$quantum_url"
|
|
fi
|
|
|
|
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
|
|
relation-set -r $r_id \
|
|
nova-volume_public_url="$nova_vol_url" \
|
|
nova-volume_admin_url="$nova_vol_url" \
|
|
nova-volume_internal_url="$nova_vol_url"
|
|
fi
|
|
done
|
|
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
|
# Let gateway nodes use the new HA address for the
|
|
# quantum API server
|
|
for r_id in `relation-ids quantum-network-service`; do
|
|
relation-set -r $r_id \
|
|
quantum_url="$quantum_url"
|
|
done
|
|
fi
|
|
fi
|
|
}
|
|
|
|
arg0=$(basename $0)
|
|
case $arg0 in
|
|
"start"|"stop") determine_services ; service_ctl all $arg0 ;;
|
|
"install") install_hook ;;
|
|
"config-changed") config_changed ;;
|
|
"upgrade-charm") upgrade_charm ;;
|
|
"amqp-relation-joined") amqp_joined ;;
|
|
"amqp-relation-changed") amqp_changed ;;
|
|
"shared-db-relation-joined") db_joined ;;
|
|
"shared-db-relation-changed") db_changed ;;
|
|
"image-service-relation-joined") exit 0 ;;
|
|
"image-service-relation-changed") image-service_changed ;;
|
|
"identity-service-relation-joined") keystone_joined ;;
|
|
"identity-service-relation-changed") keystone_changed ;;
|
|
"cinder-volume-service-relation-joined") volume_joined ;;
|
|
"nova-volume-service-relation-joined") volume_joined ;;
|
|
"cloud-compute-relation-joined") compute_joined ;;
|
|
"cloud-compute-relation-changed") compute_changed ;;
|
|
"cloud-compute-relation-departed") compute_departed ;;
|
|
"quantum-network-service-relation-joined") quantum_joined ;;
|
|
"cluster-relation-changed") cluster_changed ;;
|
|
"cluster-relation-departed") cluster_changed ;;
|
|
"ha-relation-joined") ha_relation_joined ;;
|
|
"ha-relation-changed") ha_relation_changed ;;
|
|
*) exit 0 ;;
|
|
esac
|