devstack/lib/nova

762 lines
32 KiB
Plaintext
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# lib/nova
# Functions to control the configuration and operation of the **Nova** service
# Dependencies:
# ``functions`` file
# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
# ``LIBVIRT_TYPE`` must be defined
# ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined
# ``KEYSTONE_TOKEN_FORMAT`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
# install_nova
# configure_nova
# create_nova_conf
# init_nova
# start_nova
# stop_nova
# cleanup_nova
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
# Set up default directories
NOVA_DIR=$DEST/nova
NOVACLIENT_DIR=$DEST/python-novaclient
NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova}
# INSTANCES_PATH is the previous name for this
NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}}
NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova}
NOVA_CONF_DIR=/etc/nova
NOVA_CONF=$NOVA_CONF_DIR/nova.conf
NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
# Public facing bits
NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
# Support entry points installation of console scripts
if [[ -d $NOVA_DIR/bin ]]; then
NOVA_BIN_DIR=$NOVA_DIR/bin
else
NOVA_BIN_DIR=$(get_python_exec_prefix)
fi
# Set the paths of certain binaries
NOVA_ROOTWRAP=$(get_rootwrap_location nova)
# Allow rate limiting to be turned off for testing, like for Tempest
# NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting
API_RATE_LIMIT=${API_RATE_LIMIT:-"True"}
# Nova supports pluggable schedulers. The default ``FilterScheduler``
# should work in most cases.
SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
QEMU_CONF=/etc/libvirt/qemu.conf
NOVNC_DIR=$DEST/noVNC
SPICE_DIR=$DEST/spice-html5
# Nova Network Configuration
# --------------------------
# Set defaults according to the virt driver
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
PUBLIC_INTERFACE_DEFAULT=eth2
GUEST_INTERFACE_DEFAULT=eth1
# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
if is_service_enabled neutron; then
XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
fi
elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
PUBLIC_INTERFACE_DEFAULT=eth0
FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
FLAT_NETWORK_BRIDGE_DEFAULT=br100
STUB_NETWORK=${STUB_NETWORK:-False}
else
PUBLIC_INTERFACE_DEFAULT=br100
GUEST_INTERFACE_DEFAULT=eth0
FLAT_NETWORK_BRIDGE_DEFAULT=br100
fi
NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
# If you are using the FlatDHCP network mode on multiple hosts, set the
# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
# have an IP or you risk breaking things.
#
# **DHCP Warning**: If your flat interface device uses DHCP, there will be a
# hiccup while the network is moved from the flat interface to the flat network
# bridge. This will happen when you launch your first instance. Upon launch
# you will lose all connectivity to the node, and the VM launch will probably
# fail.
#
# If you are running on a single node and don't need to access the VMs from
# devices other than that node, you can set ``FLAT_INTERFACE=``
# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST=`trueorfalse False $MULTI_HOST`
# Test floating pool and range are used for testing. They are defined
# here until the admin APIs can replace nova-manage
TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
# Functions
# ---------
# Helper to clean iptables rules
function clean_iptables() {
# Delete rules
sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash
# Delete nat rules
sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash
# Delete chains
sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash
# Delete nat chains
sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash
}
# cleanup_nova() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_nova() {
if is_service_enabled n-cpu; then
# Clean iptables from previous runs
clean_iptables
# Destroy old instances
instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
if [ ! "$instances" = "" ]; then
echo $instances | xargs -n1 sudo virsh destroy || true
echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
fi
# Logout and delete iscsi sessions
tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
for target in $tgts; do
sudo iscsiadm --mode node -T $target --logout || true
done
sudo iscsiadm --mode node --op delete || true
# Clean out the instances directory.
sudo rm -rf $NOVA_INSTANCES_PATH/*
fi
sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
}
# configure_nova_rootwrap() - configure Nova's rootwrap
function configure_nova_rootwrap() {
# Deploy new rootwrap filters files (owned by root).
# Wipe any existing rootwrap.d files first
if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
sudo rm -rf $NOVA_CONF_DIR/rootwrap.d
fi
# Deploy filters to /etc/nova/rootwrap.d
sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d
sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d
sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/*
# Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d
sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/
sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf
sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
# Specify rootwrap.conf as first parameter to nova-rootwrap
ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *"
# Set up the rootwrap sudoers for nova
TEMPFILE=`mktemp`
echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
chmod 0440 $TEMPFILE
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
}
# configure_nova() - Set config files, create data dirs, etc
function configure_nova() {
# Put config files in ``/etc/nova`` for everyone to find
if [[ ! -d $NOVA_CONF_DIR ]]; then
sudo mkdir -p $NOVA_CONF_DIR
fi
sudo chown $STACK_USER $NOVA_CONF_DIR
cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR
configure_nova_rootwrap
if is_service_enabled n-api; then
# Use the sample http middleware configuration supplied in the
# Nova sources. This paste config adds the configuration required
# for Nova to validate Keystone tokens.
# Remove legacy paste config if present
rm -f $NOVA_DIR/bin/nova-api-paste.ini
# Get the sample configuration file in place
cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR
iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
if is_service_enabled tls-proxy; then
iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
fi
iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova
iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
fi
iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR
if is_service_enabled n-cpu; then
# Force IP forwarding on, just on case
sudo sysctl -w net.ipv4.ip_forward=1
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
# Attempt to load modules: network block device - used to manage qcow images
sudo modprobe nbd || true
# Check for kvm (hardware based virtualization). If unable to initialize
# kvm, we drop back to the slower emulation mode (qemu). Note: many systems
# come with hardware virtualization disabled in BIOS.
if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
sudo modprobe kvm || true
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then
# https://bugzilla.redhat.com/show_bug.cgi?id=753589
sudo setsebool virt_use_execmem on
fi
fi
fi
# Install and configure **LXC** if specified. LXC is another approach to
# splitting a system into many smaller parts. LXC uses cgroups and chroot
# to simulate multiple systems.
if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
if is_ubuntu; then
if [[ ! "$DISTRO" > natty ]]; then
cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
sudo mkdir -p /cgroup
if ! grep -q cgroup /etc/fstab; then
echo "$cgline" | sudo tee -a /etc/fstab
fi
if ! mount -n | grep -q cgroup; then
sudo mount /cgroup
fi
fi
fi
fi
fi
# Prepare directories and packages for baremetal driver
if is_baremetal; then
configure_baremetal_nova_dirs
fi
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then
# Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
cat <<EOF | sudo tee -a $QEMU_CONF
cgroup_device_acl = [
"/dev/null", "/dev/full", "/dev/zero",
"/dev/random", "/dev/urandom",
"/dev/ptmx", "/dev/kvm", "/dev/kqemu",
"/dev/rtc", "/dev/hpet","/dev/net/tun",
]
EOF
fi
if is_ubuntu; then
LIBVIRT_DAEMON=libvirt-bin
else
LIBVIRT_DAEMON=libvirtd
fi
if is_fedora || is_suse; then
if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
[libvirt Management Access]
Identity=unix-group:$LIBVIRT_GROUP
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
EOF"
elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then
# openSUSE < 12.3 or SLE
# Work around the fact that polkit-default-privs overrules pklas
# with 'unix-group:$group'.
sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
[libvirt Management Access]
Identity=unix-user:$USER
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
EOF"
else
# Starting with fedora 18 and opensuse-12.3 enable stack-user to
# virsh -c qemu:///system by creating a policy-kit rule for
# stack-user using the new Javascript syntax
rules_dir=/etc/polkit-1/rules.d
sudo mkdir -p $rules_dir
sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
polkit.addRule(function(action, subject) {
if (action.id == 'org.libvirt.unix.manage' &&
subject.user == '"$STACK_USER"') {
return polkit.Result.YES;
}
});
EOF"
unset rules_dir
fi
fi
# The user that nova runs as needs to be member of **libvirtd** group otherwise
# nova-compute will be unable to use libvirt.
if ! getent group $LIBVIRT_GROUP >/dev/null; then
sudo groupadd $LIBVIRT_GROUP
fi
add_user_to_group $STACK_USER $LIBVIRT_GROUP
# libvirt detects various settings on startup, as we potentially changed
# the system configuration (modules, filesystems), we need to restart
# libvirt to detect those changes.
restart_service $LIBVIRT_DAEMON
fi
# Instance Storage
# ----------------
# Nova stores each instance in its own directory.
sudo mkdir -p $NOVA_INSTANCES_PATH
sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
# You can specify a different disk to be mounted and used for backing the
# virtual machines. If there is a partition labeled nova-instances we
# mount it (ext filesystems can be labeled via e2label).
if [ -L /dev/disk/by-label/nova-instances ]; then
if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then
sudo mount -L nova-instances $NOVA_INSTANCES_PATH
sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
fi
fi
fi
}
# create_nova_accounts() - Set up common required nova accounts
# Tenant User Roles
# ------------------------------------------------------------------
# service nova admin, [ResellerAdmin (swift only)]
# Migrated from keystone_data.sh
create_nova_accounts() {
SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
# Nova
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
NOVA_USER=$(keystone user-create \
--name=nova \
--pass="$SERVICE_PASSWORD" \
--tenant_id $SERVICE_TENANT \
--email=nova@example.com \
| grep " id " | get_field 2)
keystone user-role-add \
--tenant_id $SERVICE_TENANT \
--user_id $NOVA_USER \
--role_id $ADMIN_ROLE
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
NOVA_SERVICE=$(keystone service-create \
--name=nova \
--type=compute \
--description="Nova Compute Service" \
| grep " id " | get_field 2)
keystone endpoint-create \
--region RegionOne \
--service_id $NOVA_SERVICE \
--publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
--adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
--internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
NOVA_V3_SERVICE=$(keystone service-create \
--name=nova \
--type=computev3 \
--description="Nova Compute Service V3" \
| grep " id " | get_field 2)
keystone endpoint-create \
--region RegionOne \
--service_id $NOVA_V3_SERVICE \
--publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
--adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
--internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
fi
fi
}
# create_nova_conf() - Create a new nova.conf file
function create_nova_conf() {
# Remove legacy ``nova.conf``
rm -f $NOVA_DIR/bin/nova.conf
# (Re)create ``nova.conf``
rm -f $NOVA_CONF
iniset $NOVA_CONF DEFAULT verbose "True"
iniset $NOVA_CONF DEFAULT debug "True"
iniset $NOVA_CONF DEFAULT auth_strategy "keystone"
iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER"
iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
iniset $NOVA_CONF DEFAULT fixed_range ""
iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions"
iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova`
if is_baremetal; then
iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm`
fi
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
fi
iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
iniset $NOVA_CONF osapi_v3 enabled "True"
if is_fedora; then
# nova defaults to /usr/local/bin, but fedora pip likes to
# install things in /usr/bin
iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
fi
if is_service_enabled n-api; then
iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
if is_service_enabled tls-proxy; then
# Set the service port for a proxy to take the original
iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
fi
fi
if is_service_enabled cinder; then
iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API"
fi
if [ -n "$NOVA_STATE_PATH" ]; then
iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH"
iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH"
fi
if [ -n "$NOVA_INSTANCES_PATH" ]; then
iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH"
fi
if [ "$MULTI_HOST" != "False" ]; then
iniset $NOVA_CONF DEFAULT multi_host "True"
iniset $NOVA_CONF DEFAULT send_arp_for_ha "True"
fi
if [ "$SYSLOG" != "False" ]; then
iniset $NOVA_CONF DEFAULT use_syslog "True"
fi
if [ "$API_RATE_LIMIT" != "True" ]; then
iniset $NOVA_CONF DEFAULT api_rate_limit "False"
fi
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
# Add color to logging output
iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s"
iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d"
iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s"
else
# Show user_name and project_name instead of user_id and project_id
iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
fi
if is_service_enabled ceilometer; then
iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state"
iniset $NOVA_CONF DEFAULT notify_on_any_change "True"
iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier"
fi
# Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
EXTRA_OPTS=$EXTRA_FLAGS
fi
# Define extra nova conf flags by defining the array ``EXTRA_OPTS``.
# For Example: ``EXTRA_OPTS=(foo=true bar=2)``
for I in "${EXTRA_OPTS[@]}"; do
# Replace the first '=' with ' ' for iniset syntax
iniset $NOVA_CONF DEFAULT ${I/=/ }
done
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
if is_service_enabled n-cpu; then
NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
fi
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
else
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
fi
if is_service_enabled n-novnc || is_service_enabled n-xvnc; then
# Address on which instance vncservers will listen on compute hosts.
# For multi-host, this should be the management ip of the compute host.
VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
iniset $NOVA_CONF DEFAULT vnc_enabled true
iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
else
iniset $NOVA_CONF DEFAULT vnc_enabled false
fi
if is_service_enabled n-spice; then
# Address on which instance spiceservers will listen on compute hosts.
# For multi-host, this should be the management ip of the compute host.
SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
iniset $NOVA_CONF spice enabled true
iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
else
iniset $NOVA_CONF spice enabled false
fi
iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
iniset_rpc_backend nova $NOVA_CONF DEFAULT
iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
}
function init_nova_cells() {
if is_service_enabled n-cell; then
cp $NOVA_CONF $NOVA_CELLS_CONF
iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB`
iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell
iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
iniset $NOVA_CELLS_CONF cells enable True
iniset $NOVA_CELLS_CONF cells name child
iniset $NOVA_CONF DEFAULT scheduler_topic cells
iniset $NOVA_CONF DEFAULT compute_api_class nova.compute.cells_api.ComputeCellsAPI
iniset $NOVA_CONF cells enable True
iniset $NOVA_CONF cells name region
if is_service_enabled n-api-meta; then
NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS
iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata
fi
$NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync
$NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1
$NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1
fi
}
# create_nova_cache_dir() - Part of the init_nova() process
function create_nova_cache_dir() {
# Create cache dir
sudo mkdir -p $NOVA_AUTH_CACHE_DIR
sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR
rm -f $NOVA_AUTH_CACHE_DIR/*
}
function create_nova_conf_nova_network() {
iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
if [ -n "$FLAT_INTERFACE" ]; then
iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE"
fi
}
# create_nova_keys_dir() - Part of the init_nova() process
function create_nova_keys_dir() {
# Create keys dir
sudo mkdir -p ${NOVA_STATE_PATH}/keys
sudo chown -R $STACK_USER ${NOVA_STATE_PATH}
}
# init_nova() - Initialize databases, etc.
function init_nova() {
# All nova components talk to a central database.
# Only do this step once on the API node for an entire cluster.
if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
# (Re)create nova database
# Explicitly use latin1: to avoid lp#829209, nova expects the database to
# use latin1 by default, and then upgrades the database to utf8 (see the
# 082_essex.py in nova)
recreate_database nova latin1
# Migrate nova database
$NOVA_BIN_DIR/nova-manage db sync
if is_service_enabled n-cell; then
recreate_database $NOVA_CELLS_DB latin1
fi
# (Re)create nova baremetal database
if is_baremetal; then
recreate_database nova_bm latin1
$NOVA_BIN_DIR/nova-baremetal-manage db sync
fi
fi
create_nova_cache_dir
create_nova_keys_dir
}
# install_novaclient() - Collect source and prepare
function install_novaclient() {
git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
setup_develop $NOVACLIENT_DIR
}
# install_nova() - Collect source and prepare
function install_nova() {
if is_service_enabled n-cpu; then
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
if is_ubuntu; then
install_package kvm
install_package libvirt-bin
install_package python-libvirt
elif is_fedora || is_suse; then
install_package kvm
install_package libvirt
install_package libvirt-python
else
exit_distro_not_supported "libvirt installation"
fi
# Install and configure **LXC** if specified. LXC is another approach to
# splitting a system into many smaller parts. LXC uses cgroups and chroot
# to simulate multiple systems.
if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
if is_ubuntu; then
if [[ "$DISTRO" > natty ]]; then
install_package cgroup-lite
fi
else
### FIXME(dtroyer): figure this out
echo "RPM-based cgroup not implemented yet"
yum_install libcgroup-tools
fi
fi
fi
fi
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
setup_develop $NOVA_DIR
}
# start_nova_api() - Start the API process ahead of other things
function start_nova_api() {
# Get right service port for testing
local service_port=$NOVA_SERVICE_PORT
if is_service_enabled tls-proxy; then
service_port=$NOVA_SERVICE_PORT_INT
fi
screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
echo "Waiting for nova-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
die $LINENO "nova-api did not start"
fi
# Start proxies if enabled
if is_service_enabled tls-proxy; then
start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
fi
}
# start_nova() - Start running processes, including screen
function start_nova() {
NOVA_CONF_BOTTOM=$NOVA_CONF
# ``screen_it`` checks ``is_service_enabled``, it is not needed here
screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor"
if is_service_enabled n-cell; then
NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF
screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF"
screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF"
screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF"
fi
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
# The group **$LIBVIRT_GROUP** is added to the current user in this script.
# Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'"
else
screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
fi
screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM"
screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM"
screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $NOVA_CONF_BOTTOM"
screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR"
screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF"
screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR"
screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth"
# Starting the nova-objectstore only if swift3 service is not enabled.
# Swift will act as s3 objectstore.
is_service_enabled swift3 || \
screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore"
}
# stop_nova() - Stop running processes (non-screen)
function stop_nova() {
# Kill the nova screen windows
# Some services are listed here twice since more than one instance
# of a service may be running in certain configs.
for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do
screen -S $SCREEN_NAME -p $serv -X kill
done
}
# Restore xtrace
$XTRACE
# Local variables:
# mode: shell-script
# End: