Support keypair in devstack

Since Trove already supports to specify a Nova keypair when creating
instance for management convenience, devstack needs to be changed to
create the management keypair and add to Trove config file.

One extra change in this patch is to use a single config file for Trove
API, task-manager and conductor.

Change-Id: I1e6c4f4305104815bdf89b31776a4955de61bc89
Story: 2005429
Task: 30463
This commit is contained in:
Lingxian Kong 2019-08-28 16:07:07 +12:00
parent 259e0f6f09
commit 0ef474a70c
12 changed files with 104 additions and 442 deletions

View File

@ -65,7 +65,6 @@ function create_trove_accounts {
--os-password $SERVICE_PASSWORD \
--os-project-name $SERVICE_PROJECT_NAME
local trove_service=$(get_or_create_service "trove" \
"database" "Trove Service")
get_or_create_endpoint $trove_service \
@ -187,6 +186,28 @@ function _config_trove_apache_wsgi {
tail_log trove-api /var/log/${APACHE_NAME}/trove-api.log
}
function _config_nova_keypair {
export SSH_DIR=${SSH_DIR:-"$HOME/.ssh"}
if [[ ! -f ${SSH_DIR}/id_rsa.pub ]]; then
mkdir -p ${SSH_DIR}
/usr/bin/ssh-keygen -f ${SSH_DIR}/id_rsa -q -N ""
# This is to allow guest agent ssh into the controller in dev mode.
cat ${SSH_DIR}/id_rsa.pub >> ${SSH_DIR}/authorized_keys
else
# This is to allow guest agent ssh into the controller in dev mode.
cat ${SSH_DIR}/id_rsa.pub >> ${SSH_DIR}/authorized_keys
sort ${SSH_DIR}/authorized_keys | uniq > ${SSH_DIR}/authorized_keys.uniq
mv ${SSH_DIR}/authorized_keys.uniq ${SSH_DIR}/authorized_keys
chmod 600 ${SSH_DIR}/authorized_keys
fi
echo "Creating Trove management keypair ${TROVE_MGMT_KEYPAIR_NAME}"
openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} --os-project-name service --os-username trove \
keypair create --public-key ${SSH_DIR}/id_rsa.pub ${TROVE_MGMT_KEYPAIR_NAME}
iniset $TROVE_CONF DEFAULT nova_keypair ${TROVE_MGMT_KEYPAIR_NAME}
}
# configure_trove() - Set config files, create data dirs, etc
function configure_trove {
setup_develop $TROVE_DIR
@ -204,40 +225,57 @@ function configure_trove {
# (Re)create trove conf files
rm -f $TROVE_CONF
rm -f $TROVE_TASKMANAGER_CONF
rm -f $TROVE_CONDUCTOR_CONF
TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION
# (Re)create trove api conf file if needed
if is_service_enabled tr-api; then
# Set common configuration values (but only if they're defined)
iniset_conditional $TROVE_CONF DEFAULT max_accepted_volume_size $TROVE_MAX_ACCEPTED_VOLUME_SIZE
iniset_conditional $TROVE_CONF DEFAULT max_instances_per_tenant $TROVE_MAX_INSTANCES_PER_TENANT
iniset_conditional $TROVE_CONF DEFAULT max_volumes_per_tenant $TROVE_MAX_VOLUMES_PER_TENANT
# Set common configuration values (but only if they're defined)
iniset_conditional $TROVE_CONF DEFAULT max_accepted_volume_size $TROVE_MAX_ACCEPTED_VOLUME_SIZE
iniset_conditional $TROVE_CONF DEFAULT max_instances_per_tenant $TROVE_MAX_INSTANCES_PER_TENANT
iniset_conditional $TROVE_CONF DEFAULT max_volumes_per_tenant $TROVE_MAX_VOLUMES_PER_TENANT
iniset_conditional $TROVE_CONF DEFAULT agent_call_low_timeout $TROVE_AGENT_CALL_LOW_TIMEOUT
iniset_conditional $TROVE_CONF DEFAULT agent_call_high_timeout $TROVE_AGENT_CALL_HIGH_TIMEOUT
iniset_conditional $TROVE_CONF DEFAULT resize_time_out $TROVE_RESIZE_TIME_OUT
iniset_conditional $TROVE_CONF DEFAULT usage_timeout $TROVE_USAGE_TIMEOUT
iniset_conditional $TROVE_CONF DEFAULT state_change_wait_time $TROVE_STATE_CHANGE_WAIT_TIME
iniset $TROVE_CONF DEFAULT rpc_backend "rabbit"
iniset $TROVE_CONF DEFAULT control_exchange trove
iniset $TROVE_CONF DEFAULT transport_url rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/
# For message queue
iniset $TROVE_CONF DEFAULT rpc_backend "rabbit"
iniset $TROVE_CONF DEFAULT control_exchange trove
iniset $TROVE_CONF DEFAULT transport_url rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/
# For database
iniset $TROVE_CONF database connection `database_connection_url trove`
# For logging
setup_trove_logging $TROVE_CONF
iniset $TROVE_CONF DEFAULT trove_api_workers "$API_WORKERS"
configure_keystone_authtoken_middleware $TROVE_CONF trove
iniset $TROVE_CONF database connection `database_connection_url trove`
iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE
setup_trove_logging $TROVE_CONF
iniset $TROVE_CONF DEFAULT trove_api_workers "$API_WORKERS"
iniset $TROVE_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
configure_auth_token_middleware $TROVE_CONF trove
iniset $TROVE_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONF DEFAULT nova_proxy_admin_user trove
iniset $TROVE_CONF DEFAULT nova_proxy_admin_tenant_name $SERVICE_PROJECT_NAME
iniset $TROVE_CONF DEFAULT nova_proxy_admin_pass $SERVICE_PASSWORD
iniset $TROVE_CONF DEFAULT nova_proxy_admin_user_domain_name default
iniset $TROVE_CONF DEFAULT nova_proxy_admin_project_domain_name default
iniset $TROVE_CONF DEFAULT os_region_name $REGION_NAME
iniset $TROVE_CONF DEFAULT remote_nova_client trove.common.single_tenant_remote.nova_client_trove_admin
iniset $TROVE_CONF DEFAULT remote_cinder_client trove.common.single_tenant_remote.cinder_client_trove_admin
iniset $TROVE_CONF DEFAULT remote_neutron_client trove.common.single_tenant_remote.neutron_client_trove_admin
fi
iniset $TROVE_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONF DEFAULT nova_proxy_admin_user trove
iniset $TROVE_CONF DEFAULT nova_proxy_admin_tenant_name $SERVICE_PROJECT_NAME
iniset $TROVE_CONF DEFAULT nova_proxy_admin_pass $SERVICE_PASSWORD
iniset $TROVE_CONF DEFAULT nova_proxy_admin_user_domain_name default
iniset $TROVE_CONF DEFAULT nova_proxy_admin_project_domain_name default
iniset $TROVE_CONF DEFAULT os_region_name $REGION_NAME
iniset $TROVE_CONF DEFAULT remote_nova_client trove.common.single_tenant_remote.nova_client_trove_admin
iniset $TROVE_CONF DEFAULT remote_cinder_client trove.common.single_tenant_remote.cinder_client_trove_admin
iniset $TROVE_CONF DEFAULT remote_neutron_client trove.common.single_tenant_remote.neutron_client_trove_admin
iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE
iniset $TROVE_CONF cassandra tcp_ports 22,7000,7001,7199,9042,9160
iniset $TROVE_CONF couchbase tcp_ports 22,8091,8092,4369,11209-11211,21100-21199
iniset $TROVE_CONF couchdb tcp_ports 22,5984
iniset $TROVE_CONF db2 tcp_ports 22,50000
iniset $TROVE_CONF mariadb tcp_ports 22,3306,4444,4567,4568
iniset $TROVE_CONF mongodb tcp_ports 22,2500,27017,27019
iniset $TROVE_CONF mysql tcp_ports 22,3306
iniset $TROVE_CONF percona tcp_ports 22,3306
iniset $TROVE_CONF postgresql tcp_ports 22,5432
iniset $TROVE_CONF pxc tcp_ports 22,3306,4444,4567,4568
iniset $TROVE_CONF redis tcp_ports 22,6379,16379
iniset $TROVE_CONF vertica tcp_ports 22,5433,5434,5444,5450,4803
# configure apache related files
if [[ "${TROVE_USE_MOD_WSGI}" == "TRUE" ]]; then
@ -245,60 +283,6 @@ function configure_trove {
_config_trove_apache_wsgi
fi
# (Re)create trove taskmanager conf file if needed
if is_service_enabled tr-tmgr; then
# Use these values only if they're set
iniset_conditional $TROVE_TASKMANAGER_CONF DEFAULT agent_call_low_timeout $TROVE_AGENT_CALL_LOW_TIMEOUT
iniset_conditional $TROVE_TASKMANAGER_CONF DEFAULT agent_call_high_timeout $TROVE_AGENT_CALL_HIGH_TIMEOUT
iniset_conditional $TROVE_TASKMANAGER_CONF DEFAULT resize_time_out $TROVE_RESIZE_TIME_OUT
iniset_conditional $TROVE_TASKMANAGER_CONF DEFAULT usage_timeout $TROVE_USAGE_TIMEOUT
iniset_conditional $TROVE_TASKMANAGER_CONF DEFAULT state_change_wait_time $TROVE_STATE_CHANGE_WAIT_TIME
iniset $TROVE_TASKMANAGER_CONF DEFAULT rpc_backend "rabbit"
iniset $TROVE_TASKMANAGER_CONF DEFAULT control_exchange trove
iniset $TROVE_TASKMANAGER_CONF DEFAULT transport_url rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/
iniset $TROVE_TASKMANAGER_CONF database connection `database_connection_url trove`
iniset $TROVE_TASKMANAGER_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
iniset $TROVE_TASKMANAGER_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user trove
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name $SERVICE_PROJECT_NAME
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_pass $SERVICE_PASSWORD
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user_domain_name default
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_project_domain_name default
iniset $TROVE_TASKMANAGER_CONF DEFAULT os_region_name $REGION_NAME
iniset $TROVE_TASKMANAGER_CONF DEFAULT remote_nova_client trove.common.single_tenant_remote.nova_client_trove_admin
iniset $TROVE_TASKMANAGER_CONF DEFAULT remote_cinder_client trove.common.single_tenant_remote.cinder_client_trove_admin
iniset $TROVE_TASKMANAGER_CONF DEFAULT remote_neutron_client trove.common.single_tenant_remote.neutron_client_trove_admin
iniset $TROVE_TASKMANAGER_CONF cassandra tcp_ports 22,7000,7001,7199,9042,9160
iniset $TROVE_TASKMANAGER_CONF couchbase tcp_ports 22,8091,8092,4369,11209-11211,21100-21199
iniset $TROVE_TASKMANAGER_CONF couchdb tcp_ports 22,5984
iniset $TROVE_TASKMANAGER_CONF db2 tcp_ports 22,50000
iniset $TROVE_TASKMANAGER_CONF mariadb tcp_ports 22,3306,4444,4567,4568
iniset $TROVE_TASKMANAGER_CONF mongodb tcp_ports 22,2500,27017,27019
iniset $TROVE_TASKMANAGER_CONF mysql tcp_ports 22,3306
iniset $TROVE_TASKMANAGER_CONF percona tcp_ports 22,3306
iniset $TROVE_TASKMANAGER_CONF postgresql tcp_ports 22,5432
iniset $TROVE_TASKMANAGER_CONF pxc tcp_ports 22,3306,4444,4567,4568
iniset $TROVE_TASKMANAGER_CONF redis tcp_ports 22,6379,16379
iniset $TROVE_TASKMANAGER_CONF vertica tcp_ports 22,5433,5434,5444,5450,4803
setup_trove_logging $TROVE_TASKMANAGER_CONF
fi
# (Re)create trove conductor conf file if needed
if is_service_enabled tr-cond; then
iniset $TROVE_CONDUCTOR_CONF DEFAULT rpc_backend "rabbit"
iniset $TROVE_CONDUCTOR_CONF DEFAULT transport_url rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/
iniset $TROVE_CONDUCTOR_CONF database connection `database_connection_url trove`
iniset $TROVE_CONDUCTOR_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONDUCTOR_CONF DEFAULT control_exchange trove
setup_trove_logging $TROVE_CONDUCTOR_CONF
fi
# Use these values only if they're set
iniset_conditional $TROVE_GUESTAGENT_CONF DEFAULT state_change_wait_time $TROVE_STATE_CHANGE_WAIT_TIME
iniset_conditional $TROVE_GUESTAGENT_CONF DEFAULT command_process_timeout $TROVE_COMMAND_PROCESS_TIMEOUT
@ -306,14 +290,21 @@ function configure_trove {
# Set up Guest Agent conf
iniset $TROVE_GUESTAGENT_CONF DEFAULT rpc_backend "rabbit"
iniset $TROVE_GUESTAGENT_CONF DEFAULT transport_url rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$TROVE_HOST_GATEWAY:5672/
iniset $TROVE_GUESTAGENT_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_GUESTAGENT_CONF DEFAULT control_exchange trove
iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin
iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/
iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log
setup_trove_logging $TROVE_GUESTAGENT_CONF
# To avoid 'Connection timed out' error of sudo command inside the guest agent
CLOUDINIT_PATH=/etc/trove/cloudinit/${TROVE_DATASTORE_TYPE}.cloudinit
sudo mkdir -p $(dirname "$CLOUDINIT_PATH")
sudo touch "$CLOUDINIT_PATH"
sudo tee $CLOUDINIT_PATH >/dev/null <<'EOF'
#cloud-config
manage_etc_hosts: "localhost"
EOF
}
# install_trove() - Collect source and prepare
@ -532,12 +523,6 @@ function finalize_trove_network {
iniset $TROVE_CONF DEFAULT black_list_regex ""
iniset $TROVE_CONF DEFAULT management_networks ${mgmt_net_id}
iniset $TROVE_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_label_regex ${PRIVATE_NETWORK_NAME}
iniset $TROVE_TASKMANAGER_CONF DEFAULT ip_regex ""
iniset $TROVE_TASKMANAGER_CONF DEFAULT black_list_regex ""
iniset $TROVE_TASKMANAGER_CONF DEFAULT management_networks ${mgmt_net_id}
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver
}
# start_trove() - Start running processes, including screen
@ -549,8 +534,8 @@ function start_trove {
else
run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF --debug"
fi
run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_TASKMANAGER_CONF --debug"
run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONDUCTOR_CONF --debug"
run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF --debug"
run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF --debug"
}
# stop_trove() - Stop running processes
@ -579,6 +564,7 @@ function configure_tempest_for_trove {
# _setup_minimal_image() - build and register in Trove a vm image with mysql
# - datastore can be set via env variables
# (lxkong): This function is deprecated in favor of trovestack script.
function _setup_minimal_image {
##### Prerequisites:
##### - SSH KEYS has to be created on controller
@ -718,13 +704,12 @@ if is_service_enabled trove; then
install_trove
install_python_troveclient
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring Trove"
configure_trove
if is_service_enabled key; then
create_trove_accounts
fi
echo_summary "Configuring Trove"
configure_trove
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize trove
init_trove
@ -739,6 +724,8 @@ if is_service_enabled trove; then
echo "finalize_trove_network: Neutron is not enabled. Nothing to do."
fi
_config_nova_keypair
# Start the trove API and trove taskmgr components
echo_summary "Starting Trove"
start_trove

View File

@ -21,8 +21,6 @@ TRIPLEO_IMAGES_BRANCH=${TRIPLEO_IMAGES_BRANCH:-master}
# Set up configuration directory and files
TROVE_CONF_DIR=${TROVE_CONF_DIR:-/etc/trove}
TROVE_CONF=${TROVE_CONF:-${TROVE_CONF_DIR}/trove.conf}
TROVE_TASKMANAGER_CONF=${TROVE_TASKMANAGER_CONF:-${TROVE_CONF_DIR}/trove-taskmanager.conf}
TROVE_CONDUCTOR_CONF=${TROVE_CONDUCTOR_CONF:-${TROVE_CONF_DIR}/trove-conductor.conf}
TROVE_GUESTAGENT_CONF=${TROVE_GUESTAGENT_CONF:-${TROVE_CONF_DIR}/trove-guestagent.conf}
TROVE_API_PASTE_INI=${TROVE_API_PASTE_INI:-${TROVE_CONF_DIR}/api-paste.ini}
@ -60,6 +58,7 @@ else
TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
fi
TROVE_SHARE_NETWORKS=$(trueorfalse TRUE TROVE_SHARE_NETWORKS)
TROVE_MGMT_KEYPAIR_NAME=${TROVE_MGMT_KEYPAIR_NAME:-"trove-mgmt"}
# Support entry points installation of console scripts
if [[ -d $TROVE_DIR/bin ]]; then

View File

@ -229,9 +229,7 @@ we inspect the configuration parameters and disable secure RPC
messaging by adding this line into the configuration files::
amrith@amrith-work:/etc/trove$ grep enable_secure_rpc_messaging *.conf
trove-conductor.conf:enable_secure_rpc_messaging = False
trove.conf:enable_secure_rpc_messaging = False
trove-taskmanager.conf:enable_secure_rpc_messaging = False
The first thing we observe is that heartbeat messages from the
existing instance are still properly handled by the conductor and the
@ -313,9 +311,7 @@ The configuration file for this instance is::
We can now shutdown the control plane again and enable the secure RPC
capability. Observe that we've just commented out the lines (below)::
trove-conductor.conf:# enable_secure_rpc_messaging = False
trove.conf:# enable_secure_rpc_messaging = False
trove-taskmanager.conf:# enable_secure_rpc_messaging = False
And create another database instance::

View File

@ -46,7 +46,7 @@ performing operations on the Database instance.
* A service that listens on a RabbitMQ topic
* Entry point - trove/bin/trove-taskmanager
* Runs as a RpcService configured by
etc/trove/trove-taskmanager.conf.sample which defines
etc/trove/trove.conf.sample which defines
trove.taskmanager.manager.Manager as the manager - basically this is
the entry point for requests arriving through the queue
* As described above, requests for this component are pushed to MQ
@ -109,7 +109,7 @@ bus and performs the relevant operation.
"trove-conductor".
* Entry point - trove/bin/trove-conductor
* Runs as RpcService configured by
etc/trove/trove-conductor.conf.sample which defines
etc/trove/trove.conf.sample which defines
trove.conductor.manager.Manager as the manager. This is the entry
point for requests arriving on the queue.
* As guestagent above, requests are pushed to MQ from another component

View File

@ -1,5 +1,4 @@
2. In the ``/etc/trove`` directory, edit the ``trove.conf``,
``trove-taskmanager.conf`` and ``trove-conductor.conf`` files and
2. In the ``/etc/trove`` directory, edit the ``trove.conf`` file and
complete the following steps:
* Provide appropriate values for the following settings:
@ -56,7 +55,7 @@
username = trove
password = TROVE_PASS
5. Edit the ``trove-taskmanager.conf`` file so it includes the required
5. Edit the ``trove.conf`` file so it includes the required
settings to connect to the OpenStack Compute service as shown below.
Include ConfigDrive settings so that configuration is injected
into the Guest VM. And finally, if using Nova Network, set the

View File

@ -195,9 +195,7 @@ Prepare Trove configuration files
There are several configuration files for Trove:
- api-paste.ini and trove.conf — For trove-api service
- trove-taskmanager.conf — For trove-taskmanager service
- trove-guestagent.conf — For trove-guestagent service
- trove-conductor.conf — For trove-conductor service
- <datastore_manager>.cloudinit — Userdata for VMs during provisioning
Cloud-init scripts are userdata that is being used for different datastore types like mysql/percona, cassandra, mongodb, redis, couchbase while provisioning new compute instances.
@ -349,19 +347,19 @@ Run trove-api:
.. code-block:: bash
$ trove-api --config-file=${TROVE_CONF_DIR}/trove-api.conf &
$ trove-api --config-file=${TROVE_CONF_DIR}/trove.conf &
Run trove-taskmanager:
.. code-block:: bash
$ trove-taskmanager --config-file=${TROVE_CONF_DIR}/trove-taskamanger.conf &
$ trove-taskmanager --config-file=${TROVE_CONF_DIR}/trove.conf &
Run trove-conductor:
.. code-block:: bash
$ trove-conductor --config-file=${TROVE_CONF_DIR}/trove-conductor.conf &
$ trove-conductor --config-file=${TROVE_CONF_DIR}/trove.conf &
=================
Trove interaction

View File

@ -1,58 +0,0 @@
[DEFAULT]
debug = True
trove_auth_url = http://0.0.0.0/identity/v2.0
# The manager class to use for conductor. (string value)
conductor_manager = trove.conductor.manager.Manager
#===================== RPC Configuration =================================
# URL representing the messaging driver to use and its full configuration.
# If not set, we fall back to the 'rpc_backend' option and driver specific
# configuration.
#transport_url=<None>
# The messaging driver to use. Options include rabbit, qpid and zmq.
# Default is rabbit. (string value)
#rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the 'transport_url option.
control_exchange = trove
[profiler]
# If False fully disable profiling feature.
#enabled = False
# If False doesn't trace SQL requests.
#trace_sqlalchemy = True
[database]
connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove
[oslo_messaging_notifications]
#
# From oslo.messaging
#
# The Driver(s) to handle sending notifications. Possible
# values are messaging, messagingv2, routing, log, test, noop
# (multi valued)
# Deprecated group/name - [DEFAULT]/notification_driver
#driver =
# A URL representing the messaging driver to use for
# notifications. If not set, we fall back to the same
# configuration used for RPC. (string value)
# Deprecated group/name - [DEFAULT]/notification_transport_url
#transport_url = <None>
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# Deprecated group/name - [DEFAULT]/notification_topics
#topics = notifications
# The maximum number of attempts to re-send a notification
# message which failed to be delivered due to a recoverable
# error. 0 - No retry, -1 - indefinite (integer value)
#retry = -1

View File

@ -1,248 +0,0 @@
[DEFAULT]
# Show debugging output in logs (sets DEBUG log level output)
debug = True
# Update the service and instance statuses if the instances fails to become
# active within the configured usage_timeout.
# usage_timeout = 600
# restore_usage_timeout = 36000
update_status_on_fail = True
#================= RPC Configuration ================================
# URL representing the messaging driver to use and its full configuration.
# If not set, we fall back to the 'rpc_backend' option and driver specific
# configuration.
#transport_url=<None>
# The messaging driver to use. Options include rabbit, qpid and zmq.
# Default is rabbit. (string value)
#rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the 'transport_url option.
control_exchange = trove
#DB Api Implementation
db_api_implementation = trove.db.sqlalchemy.api
# Configuration options for talking to nova via the novaclient.
trove_auth_url = http://0.0.0.0/identity/v2.0
#nova_compute_url = http://localhost:8774/v2
#cinder_url = http://localhost:8776/v1
#swift_url = http://localhost:8080/v1/AUTH_
#neutron_url = http://localhost:9696/
# nova_compute_url, cinder_url, swift_url, and can all be fetched
# from Keystone. To fetch from Keystone, comment out nova_compute_url,
# cinder_url, swift_url, and and optionally uncomment the lines below.
# Region name of this node. Used when searching catalog. Default value is None.
#os_region_name = RegionOne
# Service type to use when searching catalog.
#nova_compute_service_type = compute
# Service type to use when searching catalog.
#cinder_service_type = volumev2
# Service type to use when searching catalog.
#swift_service_type = object-store
# Service type to use when searching catalog.
#neutron_service_type = network
# Config options for enabling volume service
trove_volume_support = True
block_device_mapping = vdb
device_path = /dev/vdb
mount_point = /var/lib/mysql
volume_time_out=30
server_delete_time_out=480
# Nova server boot options
# sets the --config-drive argument when doing a nova boot
# (controls how file injection is handled by nova)
use_nova_server_config_drive = True
# Configuration options for talking to nova via the novaclient.
# These options are for an admin user in your keystone config.
# It proxy's the token received from the user to send to nova via this admin users creds,
# basically acting like the client via that proxy token.
nova_proxy_admin_user = admin
nova_proxy_admin_pass = 3de4922d8b6ac5a1aad9
nova_proxy_admin_tenant_id =
# Manager impl for the taskmanager
taskmanager_manager=trove.taskmanager.manager.Manager
# Manager sends Exists Notifications
exists_notification_transformer = trove.extensions.mgmt.instances.models.NovaNotificationTransformer
notification_service_id = mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b
# Trove DNS
trove_dns_support = False
dns_account_id = 123456
dns_auth_url = http://127.0.0.1/identity/v2.0
dns_username = user
dns_passkey = password
dns_ttl = 3600
dns_domain_name = 'trove.com.'
dns_domain_id = 11111111-1111-1111-1111-111111111111
dns_driver = trove.dns.designate.driver.DesignateDriver
dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory
dns_endpoint_url = http://127.0.0.1/v1/
dns_service_type = dns
# Neutron
network_driver = trove.network.nova.NovaNetwork
management_networks =
# Trove Security Groups for Instances
trove_security_groups_support = True
trove_security_group_rule_cidr = 0.0.0.0/0
# Guest related conf
agent_heartbeat_time = 10
agent_call_low_timeout = 5
agent_call_high_timeout = 150
agent_replication_snapshot_timeout = 36000
# Config option for filtering the IP address that DNS uses
# For nova-network, set this to the appropriate network label defined in nova
# For neutron, set this to .* since users can specify custom network labels
# You can also optionally specify regex'es to match the actual IP addresses
# ip_regex (white-list) is applied before black_list_regex in the filter chain
network_label_regex = ^private$
#ip_regex = ^(15.|123.)
#black_list_regex = ^(10.0.0.)
# Datastore templates
template_path = /etc/trove/templates/
# ============ Notification System configuration ===========================
# Sets the notification driver used by oslo.messaging. Options include
# messaging, messagingv2, log and routing. Default is 'noop'
# notification_driver=noop
# Topics used for OpenStack notifications, list value. Default is 'notifications'.
# notification_topics=notifications
# ============ Logging information =============================
#log_dir = /integration/report
#log_file = trove-taskmanager.log
# ============ PyDev remote dubugging =============================
# Enable or disable pydev remote debugging.
# There are three values allowed: 'disabled', 'enabled' and 'auto'
# If value is 'auto' tries to connect to remote debugger server,
# but in case of error continue running with disabled debugging
pydev_debug = disabled
# remote debug server host and port options
#pydev_debug_host = localhost
#pydev_debug_port = 5678
# path to pydevd library. It will be used if pydevd is absent in sys.path
#pydev_path = <path>
# ================= Guestagent related ========================
#guest_config = /etc/trove/trove-guestagent.conf
# Use 'guest_info = /etc/guest_info' for pre-Kilo compatibility
#guest_info = guest_info.conf
# Use 'injected_config_location = /etc/trove' for pre-Kilo compatibility
#injected_config_location = /etc/trove/conf.d
#cloudinit_location = /etc/trove/cloudinit
[database]
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove
# connection = mysql+pymysql://root:root@localhost/trove
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
idle_timeout = 3600
# ================= Security groups related ========================
# Each future datastore implementation should implement
# its own oslo group with defined in it:
# - tcp_ports; upd_ports;
[profiler]
# If False fully disable profiling feature.
#enabled = False
# If False doesn't trace SQL requests.
#trace_sqlalchemy = True
[oslo_messaging_notifications]
#
# From oslo.messaging
#
# The Driver(s) to handle sending notifications. Possible
# values are messaging, messagingv2, routing, log, test, noop
# (multi valued)
# Deprecated group/name - [DEFAULT]/notification_driver
#driver =
# A URL representing the messaging driver to use for
# notifications. If not set, we fall back to the same
# configuration used for RPC. (string value)
# Deprecated group/name - [DEFAULT]/notification_transport_url
#transport_url = <None>
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# Deprecated group/name - [DEFAULT]/notification_topics
#topics = notifications
# The maximum number of attempts to re-send a notification
# message which failed to be delivered due to a recoverable
# error. 0 - No retry, -1 - indefinite (integer value)
#retry = -1
[mysql]
# Whether to permit ICMP. default is False.
icmp = True
# Format (single port or port range): A, B-C
# where C greater than B
tcp_ports = 3306
volume_support = True
device_path = /dev/vdb
[redis]
# Format (single port or port range): A, B-C
# where C greater than B
tcp_ports = 6379, 16379
volume_support = True
device_path = /dev/vdb
[cassandra]
tcp_ports = 7000, 7001, 9042, 9160
volume_support = True
device_path = /dev/vdb
[couchbase]
tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199
volume_support = True
device_path = /dev/vdb
[mongodb]
volume_support = True
device_path = /dev/vdb
[vertica]
tcp_ports = 5433, 5434, 22, 5444, 5450, 4803
udp_ports = 5433, 4803, 4804, 6453
volume_support = True
device_path = /dev/vdb
mount_point = /var/lib/vertica
taskmanager_strategy = trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy

View File

@ -12,20 +12,16 @@ set -o xtrace
source $_LIB/die
[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set"
[ -n "${HOST_USERNAME}" ] || die "HOST_USERNAME needs to be set to the user for the current user on the host"
if [ `whoami` = "root" ]; then
die "This should not be run as root"
fi
# copy files over the "staging" area for the guest image (they'll later be put in the correct location by the guest user
# not these keys should not be overridden otherwise a) you won't be able to ssh in and b) the guest won't be able to
# rsync the files
if [ -e ${SSH_DIR}/authorized_keys ]; then
sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/authorized_keys of=${TMP_HOOKS_PATH}/ssh-authorized-keys
# Guest agent needs to ssh into the controller to download code in dev mode.
if [ -e ${SSH_DIR}/id_rsa ]; then
sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa of=${TMP_HOOKS_PATH}/id_rsa
sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa.pub of=${TMP_HOOKS_PATH}/id_rsa.pub
else
die "SSH Authorized Keys file must exist along with pub and private key"
die "SSH keys must exist"
fi

View File

@ -9,19 +9,16 @@ set -o xtrace
SSH_DIR="/home/${GUEST_USERNAME}/.ssh"
TMP_HOOKS_DIR="/tmp/in_target.d"
if [ -e "${TMP_HOOKS_DIR}/ssh-authorized-keys" ]; then
if [ ! -e ${SSH_DIR} ]; then
# this method worked more reliable in vmware fusion over doing sudo -Hiu ${GUEST_USERNAME}
mkdir ${SSH_DIR}
chown ${GUEST_USERNAME}:${GUEST_USERNAME} ${SSH_DIR}
fi
sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/authorized_keys conv=notrunc if=${TMP_HOOKS_DIR}/ssh-authorized-keys
if [ ! -e "${SSH_DIR}/id_rsa" ]; then
sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa if=${TMP_HOOKS_DIR}/id_rsa
# perms have to be right on this file for ssh to work
sudo -Hiu ${GUEST_USERNAME} chmod 600 ${SSH_DIR}/id_rsa
sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa.pub if=${TMP_HOOKS_DIR}/id_rsa.pub
fi
if [ ! -e ${SSH_DIR} ]; then
# this method worked more reliable in vmware fusion over doing sudo -Hiu ${GUEST_USERNAME}
mkdir ${SSH_DIR}
chown ${GUEST_USERNAME}:${GUEST_USERNAME} ${SSH_DIR}
fi
if [ -e "${TMP_HOOKS_DIR}/id_rsa" ]; then
sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa.pub if=${TMP_HOOKS_DIR}/id_rsa.pub
sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa if=${TMP_HOOKS_DIR}/id_rsa
sudo -Hiu ${GUEST_USERNAME} chmod 600 ${SSH_DIR}/id_rsa
else
echo "SSH Keys were not staged by host"
exit -1

View File

@ -125,9 +125,7 @@ function clean_instances() {
for i in $LIST; do sudo virsh destroy $i; done
}
# Trove doesn't support to specify keypair when creating the db instance, the
# ssh keys are injected when the image is built. This could be removed when
# we support keypair in the future.
# In dev mode, guest agent needs to ssh into the controller to download code.
function manage_ssh_keys() {
if [ -d ${SSH_DIR} ]; then
echo "${SSH_DIR} already exists"

View File

@ -79,8 +79,6 @@ export RELEASE=${RELEASE:-$DISTRO_RELEASE}
# Set up variables for the CONF files - this has to happen after loading trovestack.rc, since
# TROVE_CONF_DIR is defined there - these will be used by devstack too
export TROVE_CONF=$TROVE_CONF_DIR/trove.conf
export TROVE_TASKMANAGER_CONF=$TROVE_CONF_DIR/trove-taskmanager.conf
export TROVE_CONDUCTOR_CONF=$TROVE_CONF_DIR/trove-conductor.conf
export TROVE_GUESTAGENT_CONF=$TROVE_CONF_DIR/trove-guestagent.conf
export TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini
export TEST_CONF=$TROVE_CONF_DIR/test.conf