Refactor new options into a new config file.

From now on all releases configurations will be available, which will make
easier to test more than two versions upgrade (meaning test upgrades like
g->h->i).
Notes:
 * from-folsom files will not be necessary since we don't test
folsom->grizzly.
 * from-havana files will be added in the next patch(with the change-id follow
change-id Iac520a8de4c3b9f14e2289cfb9b6a50c4ab0ce32).

Change-Id: I3312732b8c344ce8844a82282a032e383e07e07c
This commit is contained in:
Mauro S. M. Rodrigues 2013-11-21 18:04:19 +00:00
parent 6c471cb4c5
commit d0654b98c0
19 changed files with 230 additions and 113 deletions

43
from-grizzly/upgrade-cinder Executable file
View File

@ -0,0 +1,43 @@
#!/usr/bin/env bash
# ``upgrade-cinder``
function configure_cinder_upgrade(){
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
CINDER_POLICY_JSON=$CINDER_CONF_DIR/policy.json
# add rules needed for live volume migration
policy_add ${CINDER_POLICY_JSON} 'volume_extension:volume_admin_actions:migrate_volume_completion' '[["rule:admin_api"]]'
policy_add ${CINDER_POLICY_JSON} 'volume_extension:volume_mig_status_attribute' '[["rule:admin_api"]]'
source $TARGET_DEVSTACK_DIR/openrc admin admin
if [[ "$ENABLED_SERVICES" =~ "c-api" && "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
CINDER_V2_SERVICE=$(keystone service-create \
--name=cinder \
--type=volumev2 \
--description="Cinder Volume Service V2" \
| grep " id " | get_field 2)
keystone endpoint-create \
--region RegionOne \
--service_id $CINDER_V2_SERVICE \
--publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
--adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
--internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
fi
if [[ "$KEYSTONE_CATALOG_BACKEND" != 'sql' && -f "$KEYSTONE_CATALOG" ]]; then
stop_keystone
echo -e "
catalog.RegionOne.volumev2.publicURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
catalog.RegionOne.volumev2.adminURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
catalog.RegionOne.volumev2.internalURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
catalog.RegionOne.volumev2.name = Volume Service V2
" >> $KEYSTONE_CATALOG
start_keystone
fi
set +o xtrace
}

9
from-grizzly/upgrade-devstack Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
# ``upgrade-devstack``
function configure_devstack_upgrade(){
set -o xtrace
# nothing to see here yet
set +o xtrace
}

16
from-grizzly/upgrade-glance Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
# ``upgrade-glance``
# configure_glance
function configure_glance_upgrade(){
set -o xtrace
# guarantee we have all rules set in the new release
cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
if [ -n "$GLANCE_SCHEMA_JSON" ]; then
cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
fi
set +o xtrace
}

11
from-grizzly/upgrade-keystone Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
# ``upgrade-keystone``
configure_keystone_upgrade(){
# configure_keystone
set -o xtrace
# nothing to see here yet
set +o xtrace
}

62
from-grizzly/upgrade-nova Executable file
View File

@ -0,0 +1,62 @@
#!/usr/bin/env bash
# ``upgrade-nova``
function configure_nova_upgrade(){
# upgrade rootwrap configs
configure_nova_rootwrap
add_v3_api_to_catalog
#enable nova_v3_api
if ! grep -q "^\[composite:osapi_compute\]" $NOVA_API_PASTE_INI; then
# Add section at the end
echo -e "\n[composite:osapi_compute]\nuse = call:nova.api.openstack.urlmap:urlmap_factory" >> $NOVA_API_PASTE_INI
fi
sed -i -e "/^\[composite:osapi_compute\]/ a\\
/v3: openstack_compute_api_v3
" "$NOVA_API_PASTE_INI"
iniset $NOVA_API_PASTE_INI composite:openstack_compute_api_v3 use call:nova.api.auth:pipeline_factory
iniset $NOVA_API_PASTE_INI composite:openstack_compute_api_v3 noauth "faultwrap sizelimit noauth ratelimit osapi_compute_app_v3"
iniset $NOVA_API_PASTE_INI composite:openstack_compute_api_v3 keystone "faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v3"
iniset $NOVA_API_PASTE_INI composite:openstack_compute_api_v3 keystone_nolimit "faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3"
iniset $NOVA_API_PASTE_INI app:osapi_compute_app_v3 paste.app_factory nova.api.openstack.compute:APIRouterV3.factory
iniset $NOVA_CONF osapi_v3 enabled True
#add policy for v3 api
sed -i -e "1 a\\
`grep "v3:" $NOVA_DIR/etc/nova/policy.json | sed 's#,#,\\\#g'`
" $NOVA_CONF_DIR/policy.json
}
#add nova v3 endpoint for blue-print:nova-v3-api-tests
function add_v3_api_to_catalog(){
source $TARGET_DEVSTACK_DIR/openrc admin admin
if [[ "$ENABLED_SERVICES" =~ "n-api" && "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
NOVA_V3_SERVICE=$(keystone service-create \
--name=nova \
--type=computev3 \
--description="Nova Compute Service V3" \
| grep " id " | get_field 2)
keystone endpoint-create \
--region RegionOne \
--service_id $NOVA_V3_SERVICE \
--publicurl "$SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
--adminurl "$SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
--internalurl "$SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
fi
if [[ "$KEYSTONE_CATALOG_BACKEND" != 'sql' && -f "$KEYSTONE_CATALOG" ]]; then
stop_keystone
echo -e "
catalog.RegionOne.computev3.publicURL = http://%SERVICE_HOST%:8774/v3
catalog.RegionOne.computev3.adminURL = http://%SERVICE_HOST%:8774/v3
catalog.RegionOne.computev3.internalURL = http://%SERVICE_HOST%:8774/v3
catalog.RegionOne.computev3.name = Compute Service V3
" >> $KEYSTONE_CATALOG
start_keystone
fi
}

9
from-grizzly/upgrade-oslo Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
# ``upgrade-oslo``
function configure_oslo_upgrade(){
set -o xtrace
# nothing to see here yet
set +o xtrace
}

18
from-grizzly/upgrade-packages Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
# ``upgrade_packages``
# Upgrade Dependencies
# ====================
# Glance
GLANCE_PIPS=""
# Nova
NOVA_PIPS=""
# Horizon
HORIZON_PIPS=""
# Quantum
QUANTUM_PIPS=""

11
from-grizzly/upgrade-swift Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
# ``upgrade-swift``
function configure_swift_upgrade(){
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# nothing to see here yet
set +o xtrace
}

9
from-grizzly/upgrade-tempest Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
# ``upgrade-tempest``
function configure_tempest_upgrade(){
set -o xtrace
# nothing to see here yet
set +o xtrace
}

View File

@ -635,6 +635,18 @@ function git_update_tag() {
}
# Returns openstack release name for a given branch name
# At first it return master for a not released version
function get_release_name_from_branch(){
local branch=$1
if [ $branch == "stable/grizzly" ]; then
echo "grizzly"
else
echo "master"
fi
}
# Comment an option in an INI file
# inicomment config-file section option
function inicomment() {

View File

@ -81,38 +81,9 @@ install_cinder
install_cinderclient
# configure_cinder
CINDER_POLICY_JSON=$CINDER_CONF_DIR/policy.json
# add rules needed for live volume migration
policy_add ${CINDER_POLICY_JSON} 'volume_extension:volume_admin_actions:migrate_volume_completion' '[["rule:admin_api"]]'
policy_add ${CINDER_POLICY_JSON} 'volume_extension:volume_mig_status_attribute' '[["rule:admin_api"]]'
source $TARGET_DEVSTACK_DIR/openrc admin admin
if [[ "$ENABLED_SERVICES" =~ "c-api" && "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
CINDER_V2_SERVICE=$(keystone service-create \
--name=cinder \
--type=volumev2 \
--description="Cinder Volume Service V2" \
| grep " id " | get_field 2)
keystone endpoint-create \
--region RegionOne \
--service_id $CINDER_V2_SERVICE \
--publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
--adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
--internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
fi
if [[ "$KEYSTONE_CATALOG_BACKEND" != 'sql' && -f "$KEYSTONE_CATALOG" ]]; then
stop_keystone
echo -e "
catalog.RegionOne.volumev2.publicURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
catalog.RegionOne.volumev2.adminURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
catalog.RegionOne.volumev2.internalURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
catalog.RegionOne.volumev2.name = Volume Service V2
" >> $KEYSTONE_CATALOG
start_keystone
fi
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
configure_cinder_upgrade
# Simulate init_cinder()
create_cinder_volume_group

View File

@ -22,6 +22,9 @@ set -o xtrace
# Upgrade DevStack
# ================
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
configure_devstack_upgrade
# Preserve accrc files for future usage
cp -a $BASE_DEVSTACK_DIR/accrc $TARGET_DEVSTACK_DIR/accrc

View File

@ -80,12 +80,10 @@ source $TARGET_DEVSTACK_DIR/lib/glance
install_glanceclient
install_glance
# configure_glance
# guarantee we have all rules set in the new release
cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
if [ -n "$GLANCE_SCHEMA_JSON" ]; then
cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
fi
# configure_glance()
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
configure_glance_upgrade
# Simulate init_glance()
create_glance_cache_dir

View File

@ -76,8 +76,9 @@ source $TARGET_DEVSTACK_DIR/lib/keystone
install_keystoneclient
install_keystone
# configure_keystone
# nothing to see here yet
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
configure_keystone_upgrade
# Simulate init_keystone()
# Migrate the database

View File

@ -100,63 +100,9 @@ install_novaclient
install_nova
# configure_nova()
# configure_nova
# upgrade rootwrap configs
configure_nova_rootwrap
# Create nova.conf
# create_nova_conf
#add nova v3 endpoint for blue-print:nova-v3-api-tests
source $TARGET_DEVSTACK_DIR/openrc admin admin
if [[ "$ENABLED_SERVICES" =~ "n-api" && "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
NOVA_V3_SERVICE=$(keystone service-create \
--name=nova \
--type=computev3 \
--description="Nova Compute Service V3" \
| grep " id " | get_field 2)
keystone endpoint-create \
--region RegionOne \
--service_id $NOVA_V3_SERVICE \
--publicurl "$SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
--adminurl "$SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
--internalurl "$SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
fi
if [[ "$KEYSTONE_CATALOG_BACKEND" != 'sql' && -f "$KEYSTONE_CATALOG" ]]; then
stop_keystone
echo -e "
catalog.RegionOne.computev3.publicURL = http://%SERVICE_HOST%:8774/v3
catalog.RegionOne.computev3.adminURL = http://%SERVICE_HOST%:8774/v3
catalog.RegionOne.computev3.internalURL = http://%SERVICE_HOST%:8774/v3
catalog.RegionOne.computev3.name = Compute Service V3
" >> $KEYSTONE_CATALOG
start_keystone
fi
#enable nova_v3_api
if ! grep -q "^\[composite:osapi_compute\]" $NOVA_API_PASTE_INI; then
# Add section at the end
echo -e "\n[composite:osapi_compute]\nuse = call:nova.api.openstack.urlmap:urlmap_factory" >> $NOVA_API_PASTE_INI
fi
sed -i -e "/^\[composite:osapi_compute\]/ a\\
/v3: openstack_compute_api_v3
" "$NOVA_API_PASTE_INI"
iniset $NOVA_API_PASTE_INI composite:openstack_compute_api_v3 use call:nova.api.auth:pipeline_factory
iniset $NOVA_API_PASTE_INI composite:openstack_compute_api_v3 noauth "faultwrap sizelimit noauth ratelimit osapi_compute_app_v3"
iniset $NOVA_API_PASTE_INI composite:openstack_compute_api_v3 keystone "faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v3"
iniset $NOVA_API_PASTE_INI composite:openstack_compute_api_v3 keystone_nolimit "faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3"
iniset $NOVA_API_PASTE_INI app:osapi_compute_app_v3 paste.app_factory nova.api.openstack.compute:APIRouterV3.factory
iniset $NOVA_CONF osapi_v3 enabled True
#add policy for v3 api
sed -i -e "1 a\\
`grep "v3:" $NOVA_DIR/etc/nova/policy.json | sed 's#,#,\\\#g'`
" $NOVA_CONF_DIR/policy.json
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
configure_nova_upgrade
# Simulate init_nova()
create_nova_cache_dir

View File

@ -33,4 +33,8 @@ source $TARGET_DEVSTACK_DIR/lib/oslo
# will actually use the wrong one.
sudo pip uninstall -y oslo.config
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
configure_oslo_upgrade
install_oslo

View File

@ -19,21 +19,9 @@ source $GRENADE_DIR/grenaderc
# For debugging
set -o xtrace
# Upgrade Dependencies
# ====================
# Glance
GLANCE_PIPS=""
# Nova
NOVA_PIPS=""
# Horizon
HORIZON_PIPS=""
# Quantum
QUANTUM_PIPS=""
# Get packages to be updated
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
# SQLAlchemy is 0.7.8 ((cinder, nova)
PIPS="$GLANCE_PIPS $HORIZON_PIPS $NOVA_PIPS $QUANTUM_PIPS"

View File

@ -79,7 +79,9 @@ install_swift
install_swiftclient
# configure_swift
# nothing to see here yet
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
configure_swift_upgrade
# Simulate swift_init()

View File

@ -62,6 +62,10 @@ source $TARGET_DEVSTACK_DIR/lib/tempest
install_tempest
# configure_tempest()
upgrade_dir=$(get_release_name_from_branch $BASE_DEVSTACK_BRANCH)
source $GRENADE_DIR/"from-"$upgrade_dir/${0##*/}
configure_tempest_upgrade
OS_USERNAME=admin
configure_tempest