Drop all bash, prepare for rewrite.

This commit is contained in:
Adam Gandelman 2013-08-01 20:41:51 -07:00
parent 42861f28f6
commit 025922d5dd
34 changed files with 0 additions and 2269 deletions

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1,43 +0,0 @@
#!/bin/bash -e
# Essex-specific functions
nova_set_or_update() {
# Set a config option in nova.conf or api-paste.ini, depending
# Defaults to updating nova.conf
local key=$1
local value=$2
local conf_file=$3
local pattern=""
local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
[[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1
[[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1
[[ -z "$conf_file" ]] && conf_file=$nova_conf
case "$conf_file" in
"$nova_conf") match="\-\-$key="
pattern="--$key="
out=$pattern
;;
"$api_conf"|"$libvirtd_conf") match="^$key = "
pattern="$match"
out="$key = "
;;
*) error_out "ERROR: set_or_update: Invalid conf_file ($conf_file)"
esac
cat $conf_file | grep "$match$value" >/dev/null &&
juju-log "$CHARM: $key=$value already in set in $conf_file" \
&& return 0
if cat $conf_file | grep "$match" >/dev/null ; then
juju-log "$CHARM: Updating $conf_file, $key=$value"
sed -i "s|\($pattern\).*|\1$value|" $conf_file
else
juju-log "$CHARM: Setting new option $key=$value in $conf_file"
echo "$out$value" >>$conf_file
fi
CONFIG_CHANGED=True
}

View File

@ -1,135 +0,0 @@
#!/bin/bash -e
# Folsom-specific functions
nova_set_or_update() {
# Set a config option in nova.conf or api-paste.ini, depending
# Defaults to updating nova.conf
local key="$1"
local value="$2"
local conf_file="$3"
local section="${4:-DEFAULT}"
local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
[[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
[[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
[[ -z "$conf_file" ]] && conf_file=$nova_conf
local pattern=""
case "$conf_file" in
"$nova_conf") match="^$key="
pattern="$key="
out=$pattern
;;
"$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
"$libvirtd_conf")
match="^$key = "
pattern="$match"
out="$key = "
;;
*) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)"
esac
cat $conf_file | grep "$match$value" >/dev/null &&
juju-log "$CHARM: $key=$value already in set in $conf_file" \
&& return 0
case $conf_file in
"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
python -c "
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('$conf_file')
config.set('$section','$key','$value')
with open('$conf_file', 'wb') as configfile:
config.write(configfile)
"
;;
*)
if cat $conf_file | grep "$match" >/dev/null ; then
juju-log "$CHARM: Updating $conf_file, $key=$value"
sed -i "s|\($pattern\).*|\1$value|" $conf_file
else
juju-log "$CHARM: Setting new option $key=$value in $conf_file"
echo "$out$value" >>$conf_file
fi
;;
esac
CONFIG_CHANGED="True"
}
# Upgrade Helpers
nova_pre_upgrade() {
# Pre-upgrade helper. Caller should pass the version of OpenStack we are
# upgrading from.
return 0 # Nothing to do here, yet.
}
nova_post_upgrade() {
# Post-upgrade helper. Caller should pass the version of OpenStack we are
# upgrading from.
local upgrade_from="$1"
juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> folsom."
# We only support essex -> folsom, currently.
[[ "$upgrade_from" != "essex" ]] &&
error_out "Unsupported upgrade: $upgrade_from -> folsom"
# This may be dangerous, if we are upgrading a number of units at once
# and they all begin the same migration concurrently. Migrate only from
# the cloud controller(s).
if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
juju-log "$CHARM: Migrating nova database."
/usr/bin/nova-manage db sync
# Trigger a service restart on all other nova nodes.
trigger_remote_service_restarts
fi
# Packaging currently takes care of converting the Essex gflags format
# to .ini, but we need to update the api-paste.ini manually. It can be
# updated directly from keystone, via the identity-service relation,
# if it exists. Only services that require keystone credentials will
# have modified api-paste.ini, and only those services will have a .dpkg-dist
# version present.
local r_id=$(relation-ids identity-service)
if [[ -n "$r_id" ]] && [[ -e "$CONF_DIR/api-paste.ini.dpkg-dist" ]] ; then
# Backup the last api config, update the stock packaged version
# with our current Keystone info.
mv $API_CONF $CONF_DIR/api-paste.ini.juju-last
mv $CONF_DIR/api-paste.ini.dpkg-dist $CONF_DIR/api-paste.ini
unit=$(relation-list -r $r_id | head -n1)
# Note, this should never be called from an relation hook, only config-changed.
export JUJU_REMOTE_UNIT=$unit
service_port=$(relation-get -r $r_id service_port)
auth_port=$(relation-get -r $r_id auth_port)
service_username=$(relation-get -r $r_id service_username)
service_password=$(relation-get -r $r_id service_password)
service_tenant=$(relation-get -r $r_id service_tenant)
keystone_host=$(relation-get -r $r_id private-address)
unset JUJU_REMOTE_UNIT
juju-log "$CHARM: Updating new api-paste.ini with keystone data from $unit:$r_id"
set_or_update "service_host" "$keystone_host" "$API_CONF"
set_or_update "service_port" "$service_port" "$API_CONF"
set_or_update "auth_host" "$keystone_host" "$API_CONF"
set_or_update "auth_port" "$auth_port" "$API_CONF"
set_or_update "auth_uri" "http://$keystone_host:$service_port/" "$API_CONF"
set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
set_or_update "admin_user" "$service_username" "$API_CONF"
set_or_update "admin_password" "$service_password" "$API_CONF"
fi
# TEMPORARY
# RC3 packaging in cloud archive doesn't have this in postinst. Do it here
sed -e "s,^root_helper=.\+,rootwrap_config=/etc/nova/rootwrap.conf," -i /etc/nova/nova.conf
juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> folsom."
}

View File

@ -1,97 +0,0 @@
#!/bin/bash -e
# Folsom-specific functions
nova_set_or_update() {
# TODO: This needs to be shared among folsom, grizzly and beyond.
# Set a config option in nova.conf or api-paste.ini, depending
# Defaults to updating nova.conf
local key="$1"
local value="$2"
local conf_file="$3"
local section="${4:-DEFAULT}"
local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
[[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
[[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
[[ -z "$conf_file" ]] && conf_file=$nova_conf
local pattern=""
case "$conf_file" in
"$nova_conf") match="^$key="
pattern="$key="
out=$pattern
;;
"$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
"$libvirtd_conf")
match="^$key = "
pattern="$match"
out="$key = "
;;
*) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)"
esac
cat $conf_file | grep "$match$value" >/dev/null &&
juju-log "$CHARM: $key=$value already in set in $conf_file" \
&& return 0
case $conf_file in
"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
python -c "
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('$conf_file')
config.set('$section','$key','$value')
with open('$conf_file', 'wb') as configfile:
config.write(configfile)
"
;;
*)
if cat $conf_file | grep "$match" >/dev/null ; then
juju-log "$CHARM: Updating $conf_file, $key=$value"
sed -i "s|\($pattern\).*|\1$value|" $conf_file
else
juju-log "$CHARM: Setting new option $key=$value in $conf_file"
echo "$out$value" >>$conf_file
fi
;;
esac
CONFIG_CHANGED="True"
}
# Upgrade Helpers
nova_pre_upgrade() {
# Pre-upgrade helper. Caller should pass the version of OpenStack we are
# upgrading from.
return 0 # Nothing to do here, yet.
}
nova_post_upgrade() {
# Post-upgrade helper. Caller should pass the version of OpenStack we are
# upgrading from.
local upgrade_from="$1"
juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> grizzly."
# We only support folsom -> grizzly, currently.
[[ "$upgrade_from" != "folsom" ]] &&
error_out "Unsupported upgrade: $upgrade_from -> grizzly"
# This may be dangerous, if we are upgrading a number of units at once
# and they all begin the same migration concurrently. Migrate only from
# the cloud controller(s).
if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
juju-log "$CHARM: Migrating nova database."
/usr/bin/nova-manage db sync
# Trigger a service restart on all other nova nodes.
trigger_remote_service_restarts
fi
juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> grizzly."
}

View File

@ -1,169 +0,0 @@
#!/bin/bash -e
# Common utility functions used across all nova charms.
CONFIG_CHANGED=False
HOOKS_DIR="$CHARM_DIR/hooks"
# Load the common OpenStack helper library.
if [[ -e $HOOKS_DIR/lib/openstack-common ]] ; then
. $HOOKS_DIR/lib/openstack-common
else
juju-log "Couldn't load $HOOKS_DIR/lib/opentack-common." && exit 1
fi
set_or_update() {
# Update config flags in nova.conf or api-paste.ini.
# Config layout changed in Folsom, so this is now OpenStack release specific.
local rel=$(get_os_codename_package "nova-common")
. $HOOKS_DIR/lib/nova/$rel
nova_set_or_update $@
}
function set_config_flags() {
# Set user-defined nova.conf flags from deployment config
juju-log "$CHARM: Processing config-flags."
flags=$(config-get config-flags)
if [[ "$flags" != "None" && -n "$flags" ]] ; then
for f in $(echo $flags | sed -e 's/,/ /g') ; do
k=$(echo $f | cut -d= -f1)
v=$(echo $f | cut -d= -f2)
set_or_update "$k" "$v"
done
fi
}
configure_volume_service() {
local svc="$1"
local cur_vers="$(get_os_codename_package "nova-common")"
case "$svc" in
"cinder")
set_or_update "volume_api_class" "nova.volume.cinder.API" ;;
"nova-volume")
# nova-volume only supported before grizzly.
[[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] &&
set_or_update "volume_api_class" "nova.volume.api.API"
;;
*) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc"
return 1 ;;
esac
}
function configure_network_manager {
local manager="$1"
echo "$CHARM: configuring $manager network manager"
case $1 in
"FlatManager")
set_or_update "network_manager" "nova.network.manager.FlatManager"
;;
"FlatDHCPManager")
set_or_update "network_manager" "nova.network.manager.FlatDHCPManager"
if [[ "$CHARM" == "nova-compute" ]] ; then
local flat_interface=$(config-get flat-interface)
local ec2_host=$(relation-get ec2_host)
set_or_update flat_inteface "$flat_interface"
set_or_update ec2_dmz_host "$ec2_host"
# Ensure flat_interface has link.
if ip link show $flat_interface >/dev/null 2>&1 ; then
ip link set $flat_interface up
fi
# work around (LP: #1035172)
if [[ -e /dev/vhost-net ]] ; then
iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \
--checksum-fill
fi
fi
;;
"Quantum")
local local_ip=$(get_ip `unit-get private-address`)
[[ -n $local_ip ]] || {
juju-log "Unable to resolve local IP address"
exit 1
}
set_or_update "network_api_class" "nova.network.quantumv2.api.API"
set_or_update "quantum_auth_strategy" "keystone"
set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF"
set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF"
local cur="$(get_os_codename_package "nova-common")"
local vers=$(get_os_version_codename $cur)
if dpkg --compare-versions $vers ge '2013.1'; then
# Configure per-tenant managed quotas - >= grizzly only
set_or_update "quota_driver" "quantum.db.quota_db.DbQuotaDriver" \
"$QUANTUM_CONF" "QUOTAS"
fi
if [ "$QUANTUM_PLUGIN" == "ovs" ]; then
set_or_update "tenant_network_type" "gre" $QUANTUM_PLUGIN_CONF "OVS"
set_or_update "enable_tunneling" "True" $QUANTUM_PLUGIN_CONF "OVS"
set_or_update "tunnel_id_ranges" "1:1000" $QUANTUM_PLUGIN_CONF "OVS"
set_or_update "local_ip" "$local_ip" $QUANTUM_PLUGIN_CONF "OVS"
fi
if [ "$(config-get quantum-security-groups)" == "yes" ] && \
dpkg --compare-versions $vers ge '2013.1'; then
set_or_update "security_group_api" "quantum"
set_or_update "firewall_driver" "nova.virt.firewall.NoopFirewallDriver"
set_or_update "allow_overlapping_ips" "True" $QUANTUM_CONF
if [ "$QUANTUM_PLUGIN" == "ovs" ]; then
set_or_update "firewall_driver" \
"quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver" \
$QUANTUM_PLUGIN_CONF "SECURITYGROUP"
fi
# Ensure that security_group_* is included in quota'ed resources
set_or_update "quota_items" "network,subnet,port,security_group,security_group_rule" \
$QUANTUM_CONF "QUOTAS"
fi
;;
*) juju-log "ERROR: Invalid network manager $1" && exit 1 ;;
esac
}
function trigger_remote_service_restarts() {
# Trigger a service restart on all other nova nodes that have a relation
# via the cloud-controller interface.
# possible relations to other nova services.
local relations="cloud-compute nova-volume-service"
for rel in $relations; do
local r_ids=$(relation-ids $rel)
for r_id in $r_ids ; do
juju-log "$CHARM: Triggering a service restart on relation $r_id."
relation-set -r $r_id restart-trigger=$(uuid)
done
done
}
do_openstack_upgrade() {
# update openstack components to those provided by a new installation source
# it is assumed the calling hook has confirmed that the upgrade is sane.
local rel="$1"
shift
local packages=$@
orig_os_rel=$(get_os_codename_package "nova-common")
new_rel=$(get_os_codename_install_source "$rel")
# Backup the config directory.
local stamp=$(date +"%Y%m%d%M%S")
tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR
# load the release helper library for pre/post upgrade hooks specific to the
# release we are upgrading to.
. $HOOKS_DIR/lib/nova/$new_rel
# new release specific pre-upgrade hook
nova_pre_upgrade "$orig_os_rel"
# Setup apt repository access and kick off the actual package upgrade.
configure_install_source "$rel"
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get --option Dpkg::Options::=--force-confold -y \
install --no-install-recommends $packages
# new release sepcific post-upgrade hook
nova_post_upgrade "$orig_os_rel"
}

View File

@ -1,781 +0,0 @@
#!/bin/bash -e
# Common utility functions used across all OpenStack charms.
error_out() {
juju-log "$CHARM ERROR: $@"
exit 1
}
function service_ctl_status {
# Return 0 if a service is running, 1 otherwise.
local svc="$1"
local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }')
case $status in
"start") return 0 ;;
"stop") return 1 ;;
*) error_out "Unexpected status of service $svc: $status" ;;
esac
}
function service_ctl {
# control a specific service, or all (as defined by $SERVICES)
# service restarts will only occur depending on global $CONFIG_CHANGED,
# which should be updated in charm's set_or_update().
local config_changed=${CONFIG_CHANGED:-True}
if [[ $1 == "all" ]] ; then
ctl="$SERVICES"
else
ctl="$1"
fi
action="$2"
if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then
error_out "ERROR service_ctl: Not enough arguments"
fi
for i in $ctl ; do
case $action in
"start")
service_ctl_status $i || service $i start ;;
"stop")
service_ctl_status $i && service $i stop || return 0 ;;
"restart")
if [[ "$config_changed" == "True" ]] ; then
service_ctl_status $i && service $i restart || service $i start
fi
;;
esac
if [[ $? != 0 ]] ; then
juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action"
fi
done
# all configs should have been reloaded on restart of all services, reset
# flag if its being used.
if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] &&
[[ "$ctl" == "all" ]]; then
CONFIG_CHANGED="False"
fi
}
function configure_install_source {
# Setup and configure installation source based on a config flag.
local src="$1"
# Default to installing from the main Ubuntu archive.
[[ $src == "distro" ]] || [[ -z "$src" ]] && return 0
. /etc/lsb-release
# standard 'ppa:someppa/name' format.
if [[ "${src:0:4}" == "ppa:" ]] ; then
juju-log "$CHARM: Configuring installation from custom src ($src)"
add-apt-repository -y "$src" || error_out "Could not configure PPA access."
return 0
fi
# standard 'deb http://url/ubuntu main' entries. gpg key ids must
# be appended to the end of url after a |, ie:
# 'deb http://url/ubuntu main|$GPGKEYID'
if [[ "${src:0:3}" == "deb" ]] ; then
juju-log "$CHARM: Configuring installation from custom src URL ($src)"
if echo "$src" | grep -q "|" ; then
# gpg key id tagged to end of url folloed by a |
url=$(echo $src | cut -d'|' -f1)
key=$(echo $src | cut -d'|' -f2)
juju-log "$CHARM: Importing repository key: $key"
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \
juju-log "$CHARM WARN: Could not import key from keyserver: $key"
else
juju-log "$CHARM No repository key specified."
url="$src"
fi
echo "$url" > /etc/apt/sources.list.d/juju_deb.list
return 0
fi
# Cloud Archive
if [[ "${src:0:6}" == "cloud:" ]] ; then
# current os releases supported by the UCA.
local cloud_archive_versions="folsom grizzly"
local ca_rel=$(echo $src | cut -d: -f2)
local u_rel=$(echo $ca_rel | cut -d- -f1)
local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1)
[[ "$u_rel" != "$DISTRIB_CODENAME" ]] &&
error_out "Cannot install from Cloud Archive pocket $src " \
"on this Ubuntu version ($DISTRIB_CODENAME)!"
valid_release=""
for rel in $cloud_archive_versions ; do
if [[ "$os_rel" == "$rel" ]] ; then
valid_release=1
juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive."
fi
done
if [[ -z "$valid_release" ]] ; then
error_out "OpenStack release ($os_rel) not supported by "\
"the Ubuntu Cloud Archive."
fi
# CA staging repos are standard PPAs.
if echo $ca_rel | grep -q "staging" ; then
add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging
return 0
fi
# the others are LP-external deb repos.
case "$ca_rel" in
"$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
"$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
"$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
"$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
*) error_out "Invalid Cloud Archive repo specified: $src"
esac
apt-get -y install ubuntu-cloud-keyring
entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main"
echo "$entry" \
>/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list
return 0
fi
error_out "Invalid installation source specified in config: $src"
}
get_os_codename_install_source() {
# derive the openstack release provided by a supported installation source.
local rel="$1"
local codename="unknown"
. /etc/lsb-release
# map ubuntu releases to the openstack version shipped with it.
if [[ "$rel" == "distro" ]] ; then
case "$DISTRIB_CODENAME" in
"oneiric") codename="diablo" ;;
"precise") codename="essex" ;;
"quantal") codename="folsom" ;;
"raring") codename="grizzly" ;;
esac
fi
# derive version from cloud archive strings.
if [[ "${rel:0:6}" == "cloud:" ]] ; then
rel=$(echo $rel | cut -d: -f2)
local u_rel=$(echo $rel | cut -d- -f1)
local ca_rel=$(echo $rel | cut -d- -f2)
if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then
case "$ca_rel" in
"folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging")
codename="folsom" ;;
"grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging")
codename="grizzly" ;;
esac
fi
fi
# have a guess based on the deb string provided
if [[ "${rel:0:3}" == "deb" ]] || \
[[ "${rel:0:3}" == "ppa" ]] ; then
CODENAMES="diablo essex folsom grizzly havana"
for cname in $CODENAMES; do
if echo $rel | grep -q $cname; then
codename=$cname
fi
done
fi
echo $codename
}
get_os_codename_package() {
local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none"
pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs
case "${pkg_vers:0:6}" in
"2011.2") echo "diablo" ;;
"2012.1") echo "essex" ;;
"2012.2") echo "folsom" ;;
"2013.1") echo "grizzly" ;;
"2013.2") echo "havana" ;;
esac
}
get_os_version_codename() {
case "$1" in
"diablo") echo "2011.2" ;;
"essex") echo "2012.1" ;;
"folsom") echo "2012.2" ;;
"grizzly") echo "2013.1" ;;
"havana") echo "2013.2" ;;
esac
}
get_ip() {
dpkg -l | grep -q python-dnspython || {
apt-get -y install python-dnspython 2>&1 > /dev/null
}
hostname=$1
python -c "
import dns.resolver
import socket
try:
# Test to see if already an IPv4 address
socket.inet_aton('$hostname')
print '$hostname'
except socket.error:
try:
answers = dns.resolver.query('$hostname', 'A')
if answers:
print answers[0].address
except dns.resolver.NXDOMAIN:
pass
"
}
# Common storage routines used by cinder, nova-volume and swift-storage.
clean_storage() {
# if configured to overwrite existing storage, we unmount the block-dev
# if mounted and clear any previous pv signatures
local block_dev="$1"
juju-log "Cleaining storage '$block_dev'"
if grep -q "^$block_dev" /proc/mounts ; then
mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }')
juju-log "Unmounting $block_dev from $mp"
umount "$mp" || error_out "ERROR: Could not unmount storage from $mp"
fi
if pvdisplay "$block_dev" >/dev/null 2>&1 ; then
juju-log "Removing existing LVM PV signatures from $block_dev"
# deactivate any volgroups that may be built on this dev
vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }')
if [[ -n "$vg" ]] ; then
juju-log "Deactivating existing volume group: $vg"
vgchange -an "$vg" ||
error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?"
fi
echo "yes" | pvremove -ff "$block_dev" ||
error_out "Could not pvremove $block_dev"
else
juju-log "Zapping disk of all GPT and MBR structures"
sgdisk --zap-all $block_dev ||
error_out "Unable to zap $block_dev"
fi
}
function get_block_device() {
# given a string, return full path to the block device for that
# if input is not a block device, find a loopback device
local input="$1"
case "$input" in
/dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist."
echo "$input"; return 0;;
/*) :;;
*) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist."
echo "/dev/$input"; return 0;;
esac
# this represents a file
# support "/path/to/file|5G"
local fpath size oifs="$IFS"
if [ "${input#*|}" != "${input}" ]; then
size=${input##*|}
fpath=${input%|*}
else
fpath=${input}
size=5G
fi
## loop devices are not namespaced. This is bad for containers.
## it means that the output of 'losetup' may have the given $fpath
## in it, but that may not represent this containers $fpath, but
## another containers. To address that, we really need to
## allow some uniq container-id to be expanded within path.
## TODO: find a unique container-id that will be consistent for
## this container throughout its lifetime and expand it
## in the fpath.
# fpath=${fpath//%{id}/$THAT_ID}
local found=""
# parse through 'losetup -a' output, looking for this file
# output is expected to look like:
# /dev/loop0: [0807]:961814 (/tmp/my.img)
found=$(losetup -a |
awk 'BEGIN { found=0; }
$3 == f { sub(/:$/,"",$1); print $1; found=found+1; }
END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \
f="($fpath)")
if [ $? -ne 0 ]; then
echo "multiple devices found for $fpath: $found" 1>&2
return 1;
fi
[ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; }
if [ -n "$found" ]; then
echo "confused, $found is not a block device for $fpath";
return 1;
fi
# no existing device was found, create one
mkdir -p "${fpath%/*}"
truncate --size "$size" "$fpath" ||
{ echo "failed to create $fpath of size $size"; return 1; }
found=$(losetup --find --show "$fpath") ||
{ echo "failed to setup loop device for $fpath" 1>&2; return 1; }
echo "$found"
return 0
}
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
HAPROXY_DEFAULT=/etc/default/haproxy
##########################################################################
# Description: Configures HAProxy services for Openstack API's
# Parameters:
# Space delimited list of service:port:mode combinations for which
# haproxy service configuration should be generated for. The function
# assumes the name of the peer relation is 'cluster' and that every
# service unit in the peer relation is running the same services.
#
# Services that do not specify :mode in parameter will default to http.
#
# Example
# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http
##########################################################################
configure_haproxy() {
local address=`unit-get private-address`
local name=${JUJU_UNIT_NAME////-}
cat > $HAPROXY_CFG << EOF
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 20000
user haproxy
group haproxy
spread-checks 0
defaults
log global
mode http
option httplog
option dontlognull
retries 3
timeout queue 1000
timeout connect 1000
timeout client 30000
timeout server 30000
listen stats :8888
mode http
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /
stats auth admin:password
EOF
for service in $@; do
local service_name=$(echo $service | cut -d : -f 1)
local haproxy_listen_port=$(echo $service | cut -d : -f 2)
local api_listen_port=$(echo $service | cut -d : -f 3)
local mode=$(echo $service | cut -d : -f 4)
[[ -z "$mode" ]] && mode="http"
juju-log "Adding haproxy configuration entry for $service "\
"($haproxy_listen_port -> $api_listen_port)"
cat >> $HAPROXY_CFG << EOF
listen $service_name 0.0.0.0:$haproxy_listen_port
balance roundrobin
mode $mode
option ${mode}log
server $name $address:$api_listen_port check
EOF
local r_id=""
local unit=""
for r_id in `relation-ids cluster`; do
for unit in `relation-list -r $r_id`; do
local unit_name=${unit////-}
local unit_address=`relation-get -r $r_id private-address $unit`
if [ -n "$unit_address" ]; then
echo " server $unit_name $unit_address:$api_listen_port check" \
>> $HAPROXY_CFG
fi
done
done
done
echo "ENABLED=1" > $HAPROXY_DEFAULT
service haproxy restart
}
##########################################################################
# Description: Query HA interface to determine is cluster is configured
# Returns: 0 if configured, 1 if not configured
##########################################################################
is_clustered() {
local r_id=""
local unit=""
for r_id in $(relation-ids ha); do
if [ -n "$r_id" ]; then
for unit in $(relation-list -r $r_id); do
clustered=$(relation-get -r $r_id clustered $unit)
if [ -n "$clustered" ]; then
juju-log "Unit is haclustered"
return 0
fi
done
fi
done
juju-log "Unit is not haclustered"
return 1
}
##########################################################################
# Description: Return a list of all peers in cluster relations
##########################################################################
peer_units() {
local peers=""
local r_id=""
for r_id in $(relation-ids cluster); do
peers="$peers $(relation-list -r $r_id)"
done
echo $peers
}
##########################################################################
# Description: Determines whether the current unit is the oldest of all
# its peers - supports partial leader election
# Returns: 0 if oldest, 1 if not
##########################################################################
oldest_peer() {
peers=$1
local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2)
for peer in $peers; do
echo "Comparing $JUJU_UNIT_NAME with peers: $peers"
local r_unit_no=$(echo $peer | cut -d / -f 2)
if (($r_unit_no<$l_unit_no)); then
juju-log "Not oldest peer; deferring"
return 1
fi
done
juju-log "Oldest peer; might take charge?"
return 0
}
##########################################################################
# Description: Determines whether the current service units is the
# leader within a) a cluster of its peers or b) across a
# set of unclustered peers.
# Parameters: CRM resource to check ownership of if clustered
# Returns: 0 if leader, 1 if not
##########################################################################
eligible_leader() {
if is_clustered; then
if ! is_leader $1; then
juju-log 'Deferring action to CRM leader'
return 1
fi
else
peers=$(peer_units)
if [ -n "$peers" ] && ! oldest_peer "$peers"; then
juju-log 'Deferring action to oldest service unit.'
return 1
fi
fi
return 0
}
##########################################################################
# Description: Query Cluster peer interface to see if peered
# Returns: 0 if peered, 1 if not peered
##########################################################################
is_peered() {
local r_id=$(relation-ids cluster)
if [ -n "$r_id" ]; then
if [ -n "$(relation-list -r $r_id)" ]; then
juju-log "Unit peered"
return 0
fi
fi
juju-log "Unit not peered"
return 1
}
##########################################################################
# Description: Determines whether host is owner of clustered services
# Parameters: Name of CRM resource to check ownership of
# Returns: 0 if leader, 1 if not leader
##########################################################################
is_leader() {
hostname=`hostname`
if [ -x /usr/sbin/crm ]; then
if crm resource show $1 | grep -q $hostname; then
juju-log "$hostname is cluster leader."
return 0
fi
fi
juju-log "$hostname is not cluster leader."
return 1
}
##########################################################################
# Description: Determines whether enough data has been provided in
# configuration or relation data to configure HTTPS.
# Parameters: None
# Returns: 0 if HTTPS can be configured, 1 if not.
##########################################################################
https() {
local r_id=""
if [[ -n "$(config-get ssl_cert)" ]] &&
[[ -n "$(config-get ssl_key)" ]] ; then
return 0
fi
for r_id in $(relation-ids identity-service) ; do
for unit in $(relation-list -r $r_id) ; do
if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] &&
[[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] &&
[[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] &&
[[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then
return 0
fi
done
done
return 1
}
##########################################################################
# Description: For a given number of port mappings, configures apache2
# HTTPs local reverse proxying using certficates and keys provided in
# either configuration data (preferred) or relation data. Assumes ports
# are not in use (calling charm should ensure that).
# Parameters: Variable number of proxy port mappings as
# $internal:$external.
# Returns: 0 if reverse proxy(s) have been configured, 0 if not.
##########################################################################
enable_https() {
local port_maps="$@"
local http_restart=""
juju-log "Enabling HTTPS for port mappings: $port_maps."
# allow overriding of keystone provided certs with those set manually
# in config.
local cert=$(config-get ssl_cert)
local key=$(config-get ssl_key)
local ca_cert=""
if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then
juju-log "Inspecting identity-service relations for SSL certificate."
local r_id=""
cert=""
key=""
ca_cert=""
for r_id in $(relation-ids identity-service) ; do
for unit in $(relation-list -r $r_id) ; do
[[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)"
[[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)"
[[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)"
done
done
[[ -n "$cert" ]] && cert=$(echo $cert | base64 -di)
[[ -n "$key" ]] && key=$(echo $key | base64 -di)
[[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di)
else
juju-log "Using SSL certificate provided in service config."
fi
[[ -z "$cert" ]] || [[ -z "$key" ]] &&
juju-log "Expected but could not find SSL certificate data, not "\
"configuring HTTPS!" && return 1
apt-get -y install apache2
a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" &&
http_restart=1
mkdir -p /etc/apache2/ssl/$CHARM
echo "$cert" >/etc/apache2/ssl/$CHARM/cert
echo "$key" >/etc/apache2/ssl/$CHARM/key
if [[ -n "$ca_cert" ]] ; then
juju-log "Installing Keystone supplied CA cert."
echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
update-ca-certificates --fresh
# XXX TODO: Find a better way of exporting this?
if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
[[ -e /var/www/keystone_juju_ca_cert.crt ]] &&
rm -rf /var/www/keystone_juju_ca_cert.crt
ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \
/var/www/keystone_juju_ca_cert.crt
fi
fi
for port_map in $port_maps ; do
local ext_port=$(echo $port_map | cut -d: -f1)
local int_port=$(echo $port_map | cut -d: -f2)
juju-log "Creating apache2 reverse proxy vhost for $port_map."
cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END
Listen $ext_port
NameVirtualHost *:$ext_port
<VirtualHost *:$ext_port>
ServerName $(unit-get private-address)
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/$CHARM/cert
SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key
ProxyPass / http://localhost:$int_port/
ProxyPassReverse / http://localhost:$int_port/
ProxyPreserveHost on
</VirtualHost>
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
<Location />
Order allow,deny
Allow from all
</Location>
END
a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
http_restart=1
done
if [[ -n "$http_restart" ]] ; then
service apache2 restart
fi
}
##########################################################################
# Description: Ensure HTTPS reverse proxying is disabled for given port
# mappings.
# Parameters: Variable number of proxy port mappings as
# $internal:$external.
# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error.
##########################################################################
disable_https() {
local port_maps="$@"
local http_restart=""
juju-log "Ensuring HTTPS disabled for $port_maps."
( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0
for port_map in $port_maps ; do
local ext_port=$(echo $port_map | cut -d: -f1)
local int_port=$(echo $port_map | cut -d: -f2)
if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then
juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map."
a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
http_restart=1
fi
done
if [[ -n "$http_restart" ]] ; then
service apache2 restart
fi
}
##########################################################################
# Description: Ensures HTTPS is either enabled or disabled for given port
# mapping.
# Parameters: Variable number of proxy port mappings as
# $internal:$external.
# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not.
##########################################################################
setup_https() {
# configure https via apache reverse proxying either
# using certs provided by config or keystone.
[[ -z "$CHARM" ]] &&
error_out "setup_https(): CHARM not set."
if ! https ; then
disable_https $@
else
enable_https $@
fi
}
##########################################################################
# Description: Determine correct API server listening port based on
# existence of HTTPS reverse proxy and/or haproxy.
# Paremeters: The standard public port for given service.
# Returns: The correct listening port for API service.
##########################################################################
determine_api_port() {
local public_port="$1"
local i=0
( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1]
https >/dev/null 2>&1 && i=$[$i + 1]
echo $[$public_port - $[$i * 10]]
}
##########################################################################
# Description: Determine correct proxy listening port based on public IP +
# existence of HTTPS reverse proxy.
# Paremeters: The standard public port for given service.
# Returns: The correct listening port for haproxy service public address.
##########################################################################
determine_haproxy_port() {
local public_port="$1"
local i=0
https >/dev/null 2>&1 && i=$[$i + 1]
echo $[$public_port - $[$i * 10]]
}
##########################################################################
# Description: Print the value for a given config option in an OpenStack
# .ini style configuration file.
# Parameters: File path, option to retrieve, optional
# section name (default=DEFAULT)
# Returns: Prints value if set, prints nothing otherwise.
##########################################################################
local_config_get() {
# return config values set in openstack .ini config files.
# default placeholders starting (eg, %AUTH_HOST%) treated as
# unset values.
local file="$1"
local option="$2"
local section="$3"
[[ -z "$section" ]] && section="DEFAULT"
python -c "
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('$file')
try:
value = config.get('$section', '$option')
except:
print ''
exit(0)
if value.startswith('%'): exit(0)
print value
"
}
##########################################################################
# Description: Creates an rc file exporting environment variables to a
# script_path local to the charm's installed directory.
# Any charm scripts run outside the juju hook environment can source this
# scriptrc to obtain updated config information necessary to perform health
# checks or service changes
#
# Parameters:
# An array of '=' delimited ENV_VAR:value combinations to export.
# If optional script_path key is not provided in the array, script_path
# defaults to scripts/scriptrc
##########################################################################
function save_script_rc {
if [ ! -n "$JUJU_UNIT_NAME" ]; then
echo "Error: Missing JUJU_UNIT_NAME environment variable"
exit 1
fi
# our default unit_path
unit_path="$CHARM_DIR/scripts/scriptrc"
echo $unit_path
tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc"
echo "#!/bin/bash" > $tmp_rc
for env_var in "${@}"
do
if `echo $env_var | grep -q script_path`; then
# well then we need to reset the new unit-local script path
unit_path="$CHARM_DIR/${env_var/script_path=/}"
else
echo "export $env_var" >> $tmp_rc
fi
done
chmod 755 $tmp_rc
mv $tmp_rc $unit_path
}

View File

@ -1,293 +0,0 @@
#!/bin/bash
CHARM="nova-cloud-controller"
CONF_DIR="/etc/nova"
NOVA_CONF="/etc/nova/nova.conf"
API_CONF="/etc/nova/api-paste.ini"
QUANTUM_CONF="/etc/quantum/quantum.conf"
QUANTUM_API_CONF="/etc/quantum/api-paste.ini"
HOOKS_DIR="$CHARM_DIR/hooks"
NET_MANAGER=$(config-get network-manager)
if [[ -e $HOOKS_DIR/lib/nova/nova-common ]] ; then
. $HOOKS_DIR/lib/nova/nova-common
else
juju-log "Couldn't load $HOOKS_DIR/lib/nova/nova-common" && exit 1
fi
function determine_services {
# Sets the global $SERVICES which contains a list of all services
# managed by the charm. This changes based on OpenStack release.
# Currently, the services also determines what ends up in $PACKAGES.
# base c-c services supported across all os releases since essex.
SERVICES="nova-api-ec2 nova-api-os-compute nova-objectstore nova-cert nova-scheduler"
# determine additional services, dependent on what version of OS.
local install_src="$(config-get openstack-origin)"
install_src=$(get_os_codename_install_source "$install_src")
local os_vers=$(get_os_codename_package "nova-common")
if [[ "$os_vers" == "none" ]] ; then
[[ "$install_src" == "unknown" ]] && echo "$SERVICES" && return 0
fi
os_vers="$install_src"
if [[ "$os_vers" != "essex" ]] && [[ "$os_vers" != "folsom" ]] ; then
# nova-conductor was introduced in grizzly.
SERVICES="$SERVICES nova-conductor"
else
local n_vol=$(relation-ids nova-volume-service)
if [[ -n "$n_vol" ]] ; then
# nova-volume was dropped in G but may still be deployed for E + F,
# but should only be managed when a relation to nova-volume exists.
SERVICES="$SERVICES nova-api-os-volume"
# need to also ensure the package gets installed here. if the relation
# is introduced during another hook, a call to 'service_ctl all' will
# require it to be there.
dpkg -l | grep -q nova-api-os-volume ||
apt-get -y install nova-api-os-volume
fi
fi
# quantum is really only supported for folsom and beyond.
if [[ "$NET_MANAGER" == "Quantum" ]] ; then
[[ "$os_vers" == "essex" ]] &&
error_out "Quantum network manager only supported for Folsom + beyond."
SERVICES="$SERVICES quantum-server"
fi
}
function determine_packages {
# Derive a list of packages based on what our service needs are. This changes
# depending on several factors.
determine_services
PACKAGES="$SERVICES python-mysqldb python-keystone uuid charm-helper-sh haproxy"
if echo $PACKAGES | grep -q "quantum-server" ; then
case "$(config-get quantum-plugin)" in
"ovs") PACKAGES="$PACKAGES quantum-plugin-openvswitch" ;;
"nvp") PACKAGES="$PACKAGES quantum-plugin-nicira" ;;
esac
fi
juju-log "$CHARM: Determined required packages: $PACKAGES."
}
function determine_quantum_config {
# Set QUANTUM_PLUGIN and point QUANTUM_CORE_PLUGIN and QUANTUM_PLUGIN_CONF
# to the correct files based on configuration.
QUANTUM_PLUGIN=${QUANTUM_PLUGIN:-$(config-get quantum-plugin)}
case "$QUANTUM_PLUGIN" in
"ovs")
QUANTUM_CORE_PLUGIN="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
QUANTUM_PLUGIN_CONF="/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini"
;;
"nvp")
QUANTUM_CORE_PLUGIN="quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2"
QUANTUM_PLUGIN_CONF="/etc/quantum/plugins/nicira/nvp.ini"
;;
*)
juju-log "Unrecognised plugin for quantum: $QUANTUM_PLUGIN" && exit 1
;;
esac
}
function configure_quantum_networking {
determine_quantum_config
if [ "$(config-get conf-ext-net)" != "no" ] &&
[ -f /etc/quantum/novarc ] &&
[ -n "$(relation-ids amqp)" ] &&
[ -n "$(relation-ids shared-db)" ]; then
juju-log "Configuring external networking for quantum"
if eligible_leader "res_nova_vip"; then
# Use helper to create external network gateway
# and router using generated credentials
. /etc/quantum/novarc
quantum-ext-net -g $(config-get ext-net-gateway) \
-c $(config-get ext-net-cidr) \
-f $(config-get pool-floating-start):$(config-get pool-floating-end) \
$(config-get ext-net-name)
fi
set_or_update "default_floating_pool" "$(config-get ext-net-name)"
fi
}
function ssh_authorized_keys {
local key="$1"
local action="$2"
local exists=""
local sunit=$(echo $JUJU_REMOTE_UNIT | cut -d/ -f1)
mkdir -p /etc/nova/compute_ssh/$sunit
local authorized_keys="/etc/nova/compute_ssh/$sunit/authorized_keys"
[[ -e "$authorized_keys" ]] &&
grep -q "^$key" $authorized_keys && exists="true"
if [[ "$action" == "add" ]] ; then
[[ -n "$exists" ]] &&
juju-log "$CHARM: SSH key already authorized for $JUJU_REMOTE_UNIT." &&
return 0
echo "$key" >>$authorized_keys
juju-log "$CHARM: Authorized new SSH key for $JUJU_REMOTE_UNIT."
return 0
elif [[ "$action" == "remove" ]] ; then
# we have no way of getting to the relation state during a departed hook.
# we only have the peer's unit name, so remove an authorized key based on
# its comment, which should can be derived from the remote unit name and
# gets passed in here from caller as key/$1
local key_ln=$(sed -n "\, ${key}$,=" $authorized_keys)
[[ -z "$key_ln" ]] &&
juju-log "$CHARM: Cannot remove SSH key for $key, not authorized?" &&
return 0
for ln in $key_ln ; do
sed -i "${ln}d" $authorized_keys
juju-log "$CHARM: Removed existing SSH key ($key) from authorized_keys."
done
return 0
else
error_out "$CHARM: ssh_authorize_keys() invalid action specified: $action."
fi
}
function ssh_known_hosts {
# Keeps the system-wide SSH known hosts file up to date with compute
# nodes host keys.
local host="$1"
local sunit=$(echo $JUJU_REMOTE_UNIT | cut -d/ -f1)
mkdir -p /etc/nova/compute_ssh/$sunit
local known_hosts="/etc/nova/compute_ssh/$sunit/known_hosts"
juju-log "$CHARM: Ensuring host is included and up to date in $known_hosts."
[[ ! -e $known_hosts ]] && touch $known_hosts
local remote_key=""
remote_key=$(ssh-keyscan -H -t rsa $host) ||
error_out "$CHARM: Couldn't obtain SSH host key from $host."
local existing=$(ssh-keygen -f $known_hosts -H -F $host | tail -n1)
if [[ -n "$existing" ]] ; then
juju-log "$CHARM: Found existing SSH known host key for $host."
[[ "$existing" == "$remote_key" ]] && echo "HI"
remote=$(echo $remote_key | awk '{ print $2" "$3 }')
existing=$(echo $existing | awk '{ print $2" "$3 }')
if [[ "$remote" == "$existing" ]] ; then
juju-log "$CHARM: SSH known host key for $host is up to date."
return 0
fi
juju-log "$CHARM: Removing outdated SSH host key for $host."
ssh-keygen -f $known_hosts -R $host
else
juju-log "$CHARM: No known hosts entry for $host."
fi
juju-log "$CHARM: Adding new SSH known hosts entry for $host."
echo $remote_key >>$known_hosts
}
function ssh_compute {
if [[ "$1" == "add" ]] ; then
local ssh_key=$(relation-get ssh_public_key)
[[ -z "$ssh_key" ]] &&
juju-log "$CHARM: ssh_compute peer not ready." && exit 0
ssh_authorized_keys "$ssh_key" "add"
# need to ensure known hosts entries for all possible addresses
. /usr/share/charm-helper/sh/net.sh
local known_hosts=""
local private_address=$(relation-get private-address)
known_hosts="$private_address"
if ! ch_is_ip "$private_address" ; then
known_hosts="$known_hosts $(get_ip $private_address)"
known_hosts="$known_hosts $(echo $private_address | cut -d. -f1)"
fi
for host in $known_hosts ; do
ssh_known_hosts "$host"
done
elif [[ "$1" == "remove" ]] ; then
# remove key by referencing remote unit, not entire key.
local remote_unit=$(echo $JUJU_REMOTE_UNIT | sed -e 's,/,-,g')
ssh_authorized_keys "$remote_unit" remove
else
error_out "ssh_compute: Invalid parameter: $1."
fi
local sunit=$(echo $JUJU_REMOTE_UNIT | cut -d/ -f1)
# base64 encodings should trigger new relation events as needed.
relation-set \
known_hosts="$(base64 /etc/nova/compute_ssh/$sunit/known_hosts)" \
authorized_keys="$(base64 /etc/nova/compute_ssh/$sunit/authorized_keys)"
}
configure_https() {
# setup https termination for all api services, depending on what is running
# and topology of current deployment.
local clustered=""
( [[ -n "$(peer_units)" ]] || is_clustered ) && clustered="1"
local services=""
local ssl_port_maps=""
local haproxy_port_maps=""
local next_server=""
local api_port=""
# upstartService:defaultPort:configOption
local svcs="nova-api-ec2:8773:ec2_listen_port
nova-api-os-compute:8774:osapi_compute_listen_port
nova-objectstore:3333:s3_listen_port"
[[ "$NET_MANAGER" == "Quantum" ]] &&
svcs="$svcs quantum-server:9696:bind_port"
for s in $svcs ; do
local service=$(echo $s | cut -d: -f1)
local port=$(echo $s | cut -d: -f2)
local opt=$(echo $s | cut -d: -f3)
if [[ -n "$clustered" ]] ; then
next_server="$(determine_haproxy_port $port)"
api_port="$(determine_api_port $port)"
haproxy_port_maps="$haproxy_port_maps $service:$next_server:$api_port"
else
api_port="$(determine_api_port $port)"
next_server="$api_port"
fi
if [[ "$service" == "quantum-server" ]] ; then
set_or_update "$opt" "$api_port" "$QUANTUM_CONF"
else
set_or_update "$opt" "$api_port"
fi
ssl_port_maps="$ssl_port_maps $port:$next_server"
done
# make sure all backend api servers are bound to new backend port
# before setting up any frontends.
for s in $svcs ; do
local service=$(echo $s | cut -d: -f1)
service_ctl $service restart
done
[[ -n "$haproxy_port_maps" ]] && configure_haproxy $haproxy_port_maps
setup_https $ssl_port_maps
# another restart to ensure api servers are now bound to frontend ports
# that may have just been disabled.
for s in $svcs ; do
local service=$(echo $s | cut -d: -f1)
service_ctl $service restart
done
local r_id=""
# (re)configure ks endpoint accordingly
for r_id in $(relation-ids identity-service) ; do
keystone_joined "$r_id"
done
# pass on possibly updated quantum URL + ca_cert to compute nodes.
for r_id in $(relation-ids cloud-compute) ; do
compute_joined "$r_id"
done
# update the quantum relation, as well.
for r_id in $(relation-ids quantum-network-service) ; do
quantum_joined "$r_id"
done
}

View File

@ -1,698 +0,0 @@
#!/bin/bash -e
HOOKS_DIR="$CHARM_DIR/hooks"
arg0=$(basename $0)
if [[ -e $HOOKS_DIR/nova-cloud-controller-common ]] ; then
. $HOOKS_DIR/nova-cloud-controller-common
else
juju-log "ERROR: Could not load nova-cloud-controller-common from $HOOKS_DIR"
fi
function install_hook {
juju-log "$CHARM: Installing nova packages"
apt-get -y install python-software-properties || exit 1
configure_install_source "$(config-get openstack-origin)"
apt-get update || exit 1
determine_packages
DEBIAN_FRONTEND=noninteractive apt-get -y \
install --no-install-recommends $PACKAGES || exit 1
if [[ "$NET_MANAGER" == "Quantum" ]] ; then
determine_quantum_config
fi
configure_network_manager $NET_MANAGER
# Configure any flags specified in deployment config
set_config_flags
# Open up the various API endpoints
# EC2
open-port 8773
# osapi-compute
open-port 8774
# object-store / s3
open-port 3333
# Quantum API if configured
if [ "$NET_MANAGER" == "Quantum" ]; then
open-port 9696
fi
# Helpers for creating external and tenant networks
cp files/create_ext_net.py /usr/bin/quantum-ext-net
cp files/create_tenant_net.py /usr/bin/quantum-tenant-net
service_ctl all stop
configure_https
}
function upgrade_charm {
install_hook
service_ctl all start
}
function config_changed {
# Determine whether or not we should do an upgrade, based on whether or not
# the version offered in openstack-origin is greater than what is installed.
local install_src=$(config-get openstack-origin)
local cur=$(get_os_codename_package "nova-common")
local available=$(get_os_codename_install_source "$install_src")
if dpkg --compare-versions $(get_os_version_codename "$cur") lt \
$(get_os_version_codename "$available") ; then
juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available."
determine_packages
do_openstack_upgrade "$install_src" $PACKAGES
fi
set_config_flags
if [ "$NET_MANAGER" == "Quantum" ]; then
configure_quantum_networking
fi
determine_services
service_ctl all restart
# Save our scriptrc env variables for health checks
declare -a env_vars=(
"OPENSTACK_PORT_MCASTPORT=$(config-get ha-mcastport)"
'OPENSTACK_SERVICE_API_EC2=nova-api-ec2'
'OPENSTACK_SERVICE_API_OS_COMPUTE=nova-api-os-compute'
'OPENSTACK_SERVICE_CERT=nova-cert'
'OPENSTACK_SERVICE_CONDUCTOR=nova-conductor'
'OPENSTACK_SERVICE_OBJECTSTORE=nova-objectstore'
'OPENSTACK_SERVICE_SCHEDULER=nova-scheduler')
save_script_rc ${env_vars[@]}
configure_https
}
function amqp_joined {
# we request a username on the rabbit queue
# and store it in nova.conf. our response is its IP + PASSWD
# but we configure that in _changed
local rabbit_user=$(config-get rabbit-user)
local rabbit_vhost=$(config-get rabbit-vhost)
juju-log "$CHARM - amqp_joined: requesting credentials for $rabbit_user"
relation-set username=$rabbit_user
relation-set vhost=$rabbit_vhost
}
function amqp_changed {
# server creates our credentials and tells us where
# to connect. for now, using default vhost '/'
local rabbit_host=$(relation-get private-address)
local rabbit_password=$(relation-get password)
if [[ -z $rabbit_host ]] || \
[[ -z $rabbit_password ]] ; then
juju-log "$CHARM - amqp_changed: rabbit_host||rabbit_password not set."
exit 0
fi
# if the rabbitmq service is clustered among nodes with hacluster,
# point to its vip instead of its private-address.
local clustered=$(relation-get clustered)
if [[ -n "$clustered" ]] ; then
juju-log "$CHARM - ampq_changed: Configuring for "\
"access to haclustered rabbitmq service."
local vip=$(relation-get vip)
[[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered but no vip."\
&& exit 0
rabbit_host="$vip"
fi
local rabbit_user=$(config-get rabbit-user)
local rabbit_vhost=$(config-get rabbit-vhost)
juju-log "$CHARM - amqp_changed: Setting rabbit config in nova.conf: " \
"$rabbit_user@$rabbit_host/$rabbit_vhost"
set_or_update rabbit_host $rabbit_host
set_or_update rabbit_userid $rabbit_user
set_or_update rabbit_password $rabbit_password
set_or_update rabbit_virtual_host $rabbit_vhost
if [ "$(config-get network-manager)" == "Quantum" ]; then
set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF"
set_or_update rabbit_userid "$rabbit_user" "$QUANTUM_CONF"
set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF"
set_or_update rabbit_virtual_host "$rabbit_vhost" "$QUANTUM_CONF"
fi
determine_services && service_ctl all restart
if [ "$NET_MANAGER" == "Quantum" ]; then
configure_quantum_networking
fi
}
function db_joined {
# tell mysql provider which database we want. it will create it and give us
# credentials
local nova_db=$(config-get nova-db)
local db_user=$(config-get db-user)
local hostname=$(unit-get private-address)
juju-log "$CHARM - db_joined: requesting database access to $nova_db for "\
"$db_user@$hostname"
relation-set nova_database=$nova_db nova_username=$db_user nova_hostname=$hostname
if [ "$NET_MANAGER" == "Quantum" ]; then
relation-set quantum_database=quantum quantum_username=quantum quantum_hostname=$hostname
fi
}
function db_changed {
local db_host=`relation-get db_host`
local db_password=`relation-get nova_password`
if [[ -z $db_host ]] || [[ -z $db_password ]] ; then
juju-log "$CHARM - db_changed: db_host||db_password set, will retry."
exit 0
fi
local nova_db=$(config-get nova-db)
local db_user=$(config-get db-user)
juju-log "$CHARM - db_changed: Configuring nova.conf for access to $nova_db"
set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$nova_db"
if [ "$NET_MANAGER" == "Quantum" ]; then
local quantum_db_password=`relation-get quantum_password`
determine_quantum_config
set_or_update sql_connection "mysql://quantum:$quantum_db_password@$db_host/quantum?charset=utf8" \
$QUANTUM_PLUGIN_CONF "DATABASE"
fi
eligible_leader 'res_nova_vip' && /usr/bin/nova-manage db sync
determine_services
service_ctl all restart
if [ "$NET_MANAGER" == "Quantum" ]; then
configure_quantum_networking
fi
trigger_remote_service_restarts
}
function image-service_changed {
local api_server=$(relation-get glance-api-server)
[[ -z $api_server ]] &&
juju-log "$CHARM - image-service_changed: Peer not ready?" && exit 0
if [[ "$(get_os_codename_package nova-common)" == "essex" ]] ; then
# essex needs glance_api_servers urls stripped of protocol.
api_server="$(echo $api_server | awk '{gsub(/http:\/\/|https:\/\//,"")}1')"
fi
set_or_update glance_api_servers $api_server
set_or_update image_service "nova.image.glance.GlanceImageService"
determine_services && service_ctl all restart
}
function keystone_joined {
# we need to get two entries into keystone's catalog, nova + ec2
# group, them by prepending $service_ to each setting. the keystone
# charm will assemble settings into corresponding catalog entries
eligible_leader 'res_nova_vip' || return 0
is_clustered && local host=$(config-get vip) ||
local host=$(unit-get private-address)
if [[ "$arg0" == "identity-service-relation-joined" ]] ; then
# determine https status based only on config at this point,
# insepcting KS relation is not reliable. if KS has mulitple
# units, multiple relation-joineds are fired, resulting in the
# endpoint being configured in catalog as https before https
# is actually setup on this end. ends with failure to configure
# quantum network, if its enabled.
# if specified in config, https will have already been setup in
# install or config-changed.
if [[ -n "$(config-get ssl_cert)" ]] &&
[[ -n "$(config-get ssl_key)" ]] ; then
local scheme="https"
else
local scheme="http"
fi
else
# this function is called from other hook contexts, use normal method
# for determining https
https && scheme="https" || scheme="http"
fi
local nova_url="$scheme://$host:8774/v1.1/\$(tenant_id)s"
local ec2_url="$scheme://$host:8773/services/Cloud"
local s3_url="$scheme://$host:3333"
local region="$(config-get region)"
local quantum_url="$scheme://$host:9696"
# these are the default endpoints
relation-set nova_service="nova" \
nova_region="$region" \
nova_public_url="$nova_url" \
nova_admin_url="$nova_url" \
nova_internal_url="$nova_url" \
ec2_service="ec2" \
ec2_region="$region" \
ec2_public_url="$ec2_url" \
ec2_admin_url="$ec2_url" \
ec2_internal_url="$ec2_url" \
s3_service="s3" \
s3_region="$region" \
s3_public_url="$s3_url" \
s3_admin_url="$s3_url" \
s3_internal_url="$s3_url"
if [ "$(config-get network-manager)" == "Quantum" ]; then
relation-set quantum_service="quantum" \
quantum_region="$region" \
quantum_public_url="$quantum_url" \
quantum_admin_url="$quantum_url" \
quantum_internal_url="$quantum_url"
fi
# tack on an endpoint for nova-volume a relation exists.
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
nova_vol_url="$scheme://$host:8776/v1/\$(tenant_id)s"
relation-set nova-volume_service="nova-volume" \
nova-volume_region="$region" \
nova-volume_public_url="$nova_vol_url" \
nova-volume_admin_url="$nova_vol_url" \
nova-volume_internal_url="$nova_vol_url"
fi
}
function keystone_changed {
token=$(relation-get admin_token)
service_port=$(relation-get service_port)
auth_port=$(relation-get auth_port)
service_username=$(relation-get service_username)
service_password=$(relation-get service_password)
service_tenant=$(relation-get service_tenant)
region=$(config-get region)
[[ -z "$token" ]] || [[ -z "$service_port" ]] || [[ -z "$auth_port" ]] ||
[[ -z "$service_username" ]] || [[ -z "$service_password" ]] ||
[[ -z "$service_tenant" ]] &&
juju-log "$CHARM - keystone_changed: Peer not ready" && exit 0
[[ "$token" == "-1" ]] &&
juju-log "$CHARM - keystone_changed: admin token error" && exit 1
# No need to update paste deploy pipelines, just set a flag in nova.conf
set_or_update "auth_strategy" "keystone"
# Update keystone authentication configuration
service_host=$(relation-get service_host)
auth_host=$(relation-get auth_host)
set_or_update "keystone_ec2_url" "http://$service_host:$service_port/v2.0/ec2tokens"
if grep -q use_deprecated_auth $NOVA_CONF ; then
juju-log "$CHARM - keystone_changed: Disabling '--use_deprecated_auth"
sed -i '/--use_deprecated_auth/d' $NOVA_CONF
fi
local clustered=""
is_clustered && clustered="1"
[[ -n "$clustered" ]] && local host=$(config-get vip) ||
local host=$(unit-get private-address)
https && local scheme="https" || local scheme="http"
# update keystone authtoken settings accordingly
set_or_update "service_host" "$service_host" "$API_CONF"
set_or_update "service_port" "$service_port" "$API_CONF"
set_or_update "auth_host" "$auth_host" "$API_CONF"
set_or_update "auth_port" "$auth_port" "$API_CONF"
# XXX http hard-coded
set_or_update "auth_uri" "http://$service_host:$service_port/" "$API_CONF"
set_or_update "admin_token" "$token" "$API_CONF"
set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
set_or_update "admin_user" "$service_username" "$API_CONF"
set_or_update "admin_password" "$service_password" "$API_CONF"
if [ "$NET_MANAGER" == "Quantum" ]; then
# Configure Nova for quantum
keystone_url="http://${auth_host}:${auth_port}/v2.0"
set_or_update "quantum_url" "$scheme://$host:9696"
set_or_update "quantum_admin_tenant_name" "${service_tenant}"
set_or_update "quantum_admin_username" "${service_username}"
set_or_update "quantum_admin_password" "${service_password}"
set_or_update "quantum_admin_auth_url" "${keystone_url}"
# Configure API server for quantum
set_or_update "admin_tenant_name" "$service_tenant" "$QUANTUM_API_CONF" "filter:authtoken"
set_or_update "admin_user" "$service_username" "$QUANTUM_API_CONF" "filter:authtoken"
set_or_update "admin_password" "$service_password" "$QUANTUM_API_CONF" "filter:authtoken"
set_or_update "auth_host" "$auth_host" "$QUANTUM_API_CONF" "filter:authtoken"
set_or_update "auth_port" "$auth_port" "$QUANTUM_API_CONF" "filter:authtoken"
# Save a local copy of the credentials for later use
cat > /etc/quantum/novarc << EOF
export OS_USERNAME=${service_username}
export OS_PASSWORD=${service_password}
export OS_TENANT_NAME=${service_tenant}
export OS_AUTH_URL=${keystone_url}
export OS_REGION_NAME=$region
EOF
fi
determine_services && service_ctl all restart
if [ "$NET_MANAGER" == "Quantum" ]; then
# if first time here, config quantum before setting up
# https.
configure_quantum_networking
# ripple out changes to identity to connected services
# which use cloud-controller as source of information for
# keystone
local r_ids="$(relation-ids cloud-compute) $(relation-ids quantum-network-service)"
for id in $r_ids ; do
relation-set -r $id \
keystone_host=$auth_host \
auth_port=$auth_port \
service_port=$service_port \
service_username=$service_username \
service_password=$service_password \
service_tenant=$service_tenant \
region=$region \
# XXX http hard-coded
auth_uri="http://$service_host:$service_port/"
done
fi
configure_https
# if this changed event happens as a result of clustered VIP
# reconfigure, configure_https needs to update VIP certificate
# before quantumclient is used.
if [[ "$NET_MANAGER" == "Quantum" ]]; then
configure_quantum_networking
fi
}
volume_joined() {
local svc=""
case "$arg0" in
"cinder-volume-service-relation-joined") svc="cinder" ;;
"nova-volume-service-relation-joined") svc="nova-volume" ;;
*) svc="nova-volume" ;;
esac
local cur_vers=$(get_os_codename_package "nova-common")
if [[ "$cur_vers" != "essex" ]] && [[ "$cur_vers" != "folsom" ]] &&
[[ "$svc" == "nova-volume" ]] ; then
juju-log "$CHARM: WARNING nova-volume is only supported on Essex "\
"and Folsom. Ignoring new relation to nova-volume service."
exit 0
fi
configure_volume_service "$svc"
determine_services && service_ctl all restart
# The nova-volume API can be hosted here alongside the other
# nova API services, but there needs to be a new endpoint
# configured in keystone.
if [[ "$svc" == "nova-volume" ]] ; then
apt-get -y install nova-api-os-volume
local nova_vol_url="http://$(unit-get private-address):8776/v1/\$(tenant_id)s"
local r_ids=$(relation-ids identity-service)
for id in $r_ids ; do
juju-log "$CHARM: Registering new endpoint for nova-volume API on "\
"existing identity-service relation: $id"
nova_vol_url="http://$(unit-get private-address):8776/v1/\$(tenant_id)s"
relation-set -r $id nova-volume_service="nova-volume" \
nova-volume_region="$(config-get region)" \
nova-volume_public_url="$nova_vol_url" \
nova-volume_admin_url="$nova_vol_url" \
nova-volume_internal_url="$nova_vol_url"
done
fi
if [[ "$svc" == "cinder" ]] ; then
# Compute nodes need to be notified to set their volume
# driver accordingly.
r_ids=$(relation-ids cloud-compute)
for id in $r_ids ; do
relation-set -r $id volume_service="cinder"
done
fi
}
compute_joined() {
local r_id="$1"
[[ -n "$r_id" ]] && r_id="-r $r_id"
eligible_leader 'res_nova_vip' || return 0
relation-set $r_id network_manager=$(config-get network-manager)
# XXX Should point to VIP if clustered, or this may not even be needed.
relation-set $r_id ec2_host=$(unit-get private-address)
local sect="filter:authtoken"
keystone_host=$(local_config_get $API_CONF auth_host $sect)
if [ "$NET_MANAGER" == "Quantum" ]; then
if [[ -n "$keystone_host" ]]; then
relation-set $r_id \
keystone_host=$keystone_host \
auth_port=$(local_config_get $API_CONF auth_port $sect) \
service_port=$(local_config_get $API_CONF service_port $sect) \
service_username=$(local_config_get $API_CONF admin_user $sect) \
service_password=$(local_config_get $API_CONF admin_password $sect) \
service_tenant=$(local_config_get $API_CONF admin_tenant_name $sect) \
auth_uri=$(local_config_get $API_CONF auth_uri $sect)
fi
is_clustered && local host=$(config-get vip) ||
local host=$(unit-get private-address)
https && local scheme="https" || local scheme="http"
local quantum_url="$scheme://$host:9696"
relation-set $r_id quantum_url=$quantum_url \
quantum_plugin=$(config-get quantum-plugin) \
region=$(config-get region) \
quantum_security_groups=$(config-get quantum-security-groups)
fi
# must pass on the keystone CA certficiate, if it exists.
cert="/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt"
if [[ -n "$keystone_host" ]] && [[ -e $cert ]] ; then
cert=$(cat $cert | base64)
relation-set $r_id ca_cert="$cert"
fi
# volume driver is dependent on os version, or presence
# of cinder (on folsom, at least)
local cur_vers=$(get_os_codename_package "nova-common")
local vol_drv="cinder"
case "$cur_vers" in
"essex")
vol_drv="nova-volume"
;;
"folsom")
[[ -z "$(relation-ids cinder-volume-service)" ]] && vol_drv="nova-volume"
;;
esac
relation-set $r_id volume_service="$vol_drv"
}
compute_changed() {
local migration_auth="$(relation-get migration_auth_type)"
[[ -z "$migration_auth" ]] &&
juju-log "$CHARM: compute_changed - Peer not ready or "\
"no migration auth. configured." && exit 0
case "$migration_auth" in
"ssh") ssh_compute add ;;
esac
}
compute_departed() {
ssh_compute remove
}
function quantum_joined() {
# Tell quantum service about keystone
eligible_leader || return 0
local r_id="$1"
[[ -n "$r_id" ]] && r_id="-r $r_id"
local sect="filter:authtoken"
keystone_host=$(local_config_get $API_CONF auth_host $sect)
if [ -n "$keystone_host" ]; then
relation-set $r_id \
keystone_host=$keystone_host \
auth_port=$(local_config_get $API_CONF auth_port $sect) \
service_port=$(local_config_get $API_CONF service_port $sect) \
service_username=$(local_config_get $API_CONF admin_user $sect) \
service_password=$(local_config_get $API_CONF admin_password $sect) \
service_tenant=$(local_config_get $API_CONF admin_tenant_name $sect) \
auth_uri=$(local_config_get $API_CONF auth_uri $sect)
fi
# must pass on the keystone CA certficiate, if it exists.
cert="/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt"
if [[ -n "$keystone_host" ]] && [[ -e $cert ]] ; then
cert=$(cat $cert | base64)
relation-set $r_id ca_cert="$cert"
fi
is_clustered && local host=$(config-get vip) ||
local host=$(unit-get private-address)
https && local scheme="https" || local scheme="http"
local quantum_url="$scheme://$host:9696"
relation-set $r_id quantum_host="$host" quantum_port="9696" \
quantum_url=$quantum_url \
quantum_plugin=$(config-get quantum-plugin) \
region=$(config-get region)
}
function cluster_changed() {
[[ -z "$(peer_units)" ]] &&
juju-log "cluster_changed() with no peers." && exit 0
# upstartService:defaultPort:configOption
local svcs="nova-api-ec2:8773:ec2_listen_port
nova-api-os-compute:8774:osapi_compute_listen_port
nova-objectstore:3333:s3_listen_port"
[[ "$NET_MANAGER" == "Quantum" ]] &&
svcs="$svcs quantum-server:9696:bind_port"
for s in $svcs ; do
local service=$(echo $s | cut -d: -f1)
local port=$(echo $s | cut -d: -f2)
local opt=$(echo $s | cut -d: -f3)
local next_server="$(determine_haproxy_port $port)"
local api_port="$(determine_api_port $port)"
local haproxy_port_maps="$haproxy_port_maps $service:$next_server:$api_port:http"
if [[ "$service" == "quantum-server" ]] ; then
set_or_update "$opt" "$api_port" "$QUANTUM_CONF"
else
set_or_update "$opt" "$api_port"
fi
service_ctl $service restart
done
configure_haproxy $haproxy_port_maps
}
function ha_relation_joined() {
local corosync_bindiface=`config-get ha-bindiface`
local corosync_mcastport=`config-get ha-mcastport`
local vip=`config-get vip`
local vip_iface=`config-get vip_iface`
local vip_cidr=`config-get vip_cidr`
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
[ -n "$corosync_mcastport" ]; then
# TODO: This feels horrible but the data required by the hacluster
# charm is quite complex and is python ast parsed.
resources="{
'res_nova_vip':'ocf:heartbeat:IPaddr2',
'res_nova_haproxy':'lsb:haproxy'
}"
resource_params="{
'res_nova_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
'res_nova_haproxy': 'op monitor interval=\"5s\"'
}"
init_services="{
'res_nova_haproxy':'haproxy'
}"
clones="{
'cl_nova_haproxy':'res_nova_haproxy'
}"
relation-set corosync_bindiface=$corosync_bindiface \
corosync_mcastport=$corosync_mcastport \
resources="$resources" resource_params="$resource_params" \
init_services="$init_services" clones="$clones"
else
juju-log "Insufficient configuration data to configure hacluster"
exit 1
fi
}
function ha_relation_changed() {
local clustered=`relation-get clustered`
if [ -n "$clustered" ] && is_leader 'res_nova_vip'; then
https && local scheme="https" || local scheme="http"
for r_id in `relation-ids identity-service`; do
local address=$(config-get vip)
local region=$(config-get region)
local nova_url="$scheme://$address:8774/v1.1/\$(tenant_id)s"
local ec2_url="$scheme://$address:8773/services/Cloud"
local s3_url="$scheme://$address:3333"
local quantum_url="$scheme://$address:9696"
local nova_vol_url="$scheme://$address:8776/v1/\$(tenant_id)s"
relation-set -r $r_id \
nova_service="nova" \
nova_region="$region" \
nova_public_url="$nova_url" \
nova_admin_url="$nova_url" \
nova_internal_url="$nova_url" \
ec2_service="ec2" \
ec2_region="$region" \
ec2_public_url="$ec2_url" \
ec2_admin_url="$ec2_url" \
ec2_internal_url="$ec2_url" \
s3_service="s3" \
s3_region="$region" \
s3_public_url="$s3_url" \
s3_admin_url="$s3_url" \
s3_internal_url="$s3_url"
if [ "$(config-get network-manager)" == "Quantum" ]; then
relation-set -r $r_id \
quantum_service="quantum" \
quantum_region="$region" \
quantum_public_url="$quantum_url" \
quantum_admin_url="$quantum_url" \
quantum_internal_url="$quantum_url"
fi
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
relation-set -r $r_id \
nova-volume_service="nova-volume" \
nova-volume_region="$region" \
nova-volume_public_url="$nova_vol_url" \
nova-volume_admin_url="$nova_vol_url" \
nova-volume_internal_url="$nova_vol_url"
fi
done
if [ "$(config-get network-manager)" == "Quantum" ]; then
# Let gateway nodes use the new HA address for the
# quantum API server
for r_id in `relation-ids quantum-network-service`; do
relation-set -r $r_id \
quantum_host="$address" quantum_port="9696" \
quantum_url="$quantum_url" region="$region"
done
fi
fi
}
arg0=$(basename $0)
case $arg0 in
"start"|"stop") determine_services ; service_ctl all $arg0 ;;
"install") install_hook ;;
"config-changed") config_changed ;;
"upgrade-charm") upgrade_charm ;;
"amqp-relation-joined") amqp_joined ;;
"amqp-relation-changed") amqp_changed ;;
"shared-db-relation-joined") db_joined ;;
"shared-db-relation-changed") db_changed ;;
"image-service-relation-joined") exit 0 ;;
"image-service-relation-changed") image-service_changed ;;
"identity-service-relation-joined") keystone_joined ;;
"identity-service-relation-changed") keystone_changed ;;
"cinder-volume-service-relation-joined") volume_joined ;;
"nova-volume-service-relation-joined") volume_joined ;;
"cloud-compute-relation-joined") compute_joined ;;
"cloud-compute-relation-changed") compute_changed ;;
"cloud-compute-relation-departed") compute_departed ;;
"quantum-network-service-relation-joined") quantum_joined ;;
"cluster-relation-changed") cluster_changed ;;
"cluster-relation-departed") cluster_changed ;;
"ha-relation-joined") ha_relation_joined ;;
"ha-relation-changed") ha_relation_changed ;;
*) exit 0 ;;
esac

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1 +0,0 @@
nova-cloud-controller-relations

View File

@ -1,11 +0,0 @@
nova-cloud-controller:
nova-release: trunk
nova-config: /etc/nova/nova.conf
db-user: nova
nova-db: nova
rabbit-user: nova
rabbit-vhost: nova
network-manager: FlatManager
bridge-interface: br100
bridge-ip: 11.0.0.1
bridge-netmask: 255.255.255.0

View File

@ -1,6 +0,0 @@
#!/bin/bash
. novarc
image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz"
[[ ! -e $image ]] && wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image
uec-publish-tarball $image images
euca-describe-images

View File

@ -1,12 +0,0 @@
#!/bin/bash
apt-get -y install unzip
nova-manage user admin admin
echo "User creation: $?"
nova-manage project create novaproject admin
echo "Project creation: $?"
nova-manage network create novanet 11.0.0.0/24 1 255
echo "Network creation: $?"
nova-manage project zipfile novaproject admin
echo "Zipfile creation: $?"
unzip nova.zip