Consume TripleO CI scenarios envs from THT

Use TripleO CI scenarios environments from TripleO Heat Templates.

Note: Newton jobs will fail once we merge this patch, and will pass
again once we merge and backport:
https://review.openstack.org/#/c/402119/ into stable/newton.

Change-Id: I8235fa0648a05c2420c536e388b05e9c2f8fecc1
Depends-On: I3527a64c0c8f56ca77115d32849fa23fe710112d
This commit is contained in:
Emilien Macchi 2016-11-24 11:26:32 -05:00
parent 03bc7d294c
commit 213ec15d3e
8 changed files with 16 additions and 600 deletions

View File

@ -759,9 +759,14 @@ function overcloud_pingtest {
EXTERNAL_NETWORK_GATEWAY=${EXTERNAL_NETWORK_GATEWAY:-"${CTLPLANE_NET}.1"}
TENANT_STACK_DEPLOY_ARGS=${TENANT_STACK_DEPLOY_ARGS:-""}
neutron subnet-create --name ext-subnet --allocation-pool start=$FLOATING_IP_START,end=$FLOATING_IP_END --disable-dhcp --gateway $EXTERNAL_NETWORK_GATEWAY nova $FLOATING_IP_CIDR
TENANT_PINGTEST_TEMPLATE=/usr/share/tripleo-ci/$PINGTEST_TEMPLATE.yaml
if [ ! -e $TENANT_PINGTEST_TEMPLATE ]; then
TENANT_PINGTEST_TEMPLATE=$(dirname `readlink -f -- $0`)/../templates/$PINGTEST_TEMPLATE.yaml
# pingtest environment for scenarios jobs is in TripleO Heat Templates.
if [ -e /usr/share/openstack-tripleo-heat-templates/ci/pingtests/$MULTINODE_ENV_NAME.yaml ]; then
TENANT_PINGTEST_TEMPLATE=/usr/share/openstack-tripleo-heat-templates/ci/pingtests/$MULTINODE_ENV_NAME.yaml
else
TENANT_PINGTEST_TEMPLATE=/usr/share/tripleo-ci/$PINGTEST_TEMPLATE.yaml
if [ ! -e $TENANT_PINGTEST_TEMPLATE ]; then
TENANT_PINGTEST_TEMPLATE=$(dirname `readlink -f -- $0`)/../templates/$PINGTEST_TEMPLATE.yaml
fi
fi
log "Overcloud pingtest, creating tenant-stack heat stack:"
heat stack-create -f $TENANT_PINGTEST_TEMPLATE $TENANT_STACK_DEPLOY_ARGS tenant-stack || exitval=1

View File

@ -1,174 +0,0 @@
heat_template_version: 2013-05-23
description: >
HOT template to created resources deployed by scenario001.
parameters:
key_name:
type: string
description: Name of keypair to assign to servers
default: 'pingtest_key'
image:
type: string
description: Name of image to use for servers
default: 'pingtest_image'
public_net_name:
type: string
default: 'nova'
description: >
ID or name of public network for which floating IP addresses will be allocated
private_net_name:
type: string
description: Name of private network to be created
default: 'default-net'
private_net_cidr:
type: string
description: Private network address (CIDR notation)
default: '192.168.2.0/24'
private_net_gateway:
type: string
description: Private network gateway address
default: '192.168.2.1'
private_net_pool_start:
type: string
description: Start of private network IP address allocation pool
default: '192.168.2.100'
private_net_pool_end:
type: string
default: '192.168.2.200'
description: End of private network IP address allocation pool
resources:
key_pair:
type: OS::Nova::KeyPair
properties:
save_private_key: true
name: {get_param: key_name }
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: private_net_name }
private_subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: private_net }
cidr: { get_param: private_net_cidr }
gateway_ip: { get_param: private_net_gateway }
allocation_pools:
- start: { get_param: private_net_pool_start }
end: { get_param: private_net_pool_end }
router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: { get_param: public_net_name }
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource: router }
subnet_id: { get_resource: private_subnet }
server1:
type: OS::Nova::Server
properties:
name: Server1
flavor: { get_resource: test_flavor }
image: { get_param: image }
key_name: { get_resource: key_pair }
networks:
- port: { get_resource: server1_port }
server1_port:
type: OS::Neutron::Port
properties:
network_id: { get_resource: private_net }
fixed_ips:
- subnet_id: { get_resource: private_subnet }
security_groups: [{ get_resource: server_security_group }]
server1_floating_ip:
type: OS::Neutron::FloatingIP
# TODO: investigate why we need this depends_on and if we could
# replace it by router_id with get_resource: router_interface
depends_on: router_interface
properties:
floating_network: { get_param: public_net_name }
port_id: { get_resource: server1_port }
server_security_group:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for server
name: pingtest-security-group
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 22
port_range_max: 22
- remote_ip_prefix: 0.0.0.0/0
protocol: icmp
test_flavor:
type: OS::Nova::Flavor
properties:
ram: 512
vcpus: 1
gnocchi_res_alarm:
type: OS::Aodh::GnocchiResourcesAlarm
properties:
description: Do stuff with gnocchi
metric: cpu_util
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
alarm_actions: []
resource_type: instance
resource_id: { get_resource: server1 }
comparison_operator: gt
asg:
type: OS::Heat::AutoScalingGroup
properties:
max_size: 5
min_size: 1
resource:
type: OS::Heat::RandomString
scaleup_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: asg}
cooldown: 0
scaling_adjustment: 1
alarm:
type: OS::Aodh::Alarm
properties:
description: Scale-up if the average CPU > 50% for 1 minute
meter_name: test_meter
statistic: count
comparison_operator: ge
threshold: 1
period: 60
evaluation_periods: 1
alarm_actions:
- {get_attr: [scaleup_policy, alarm_url]}
matching_metadata:
metadata.metering.stack_id: {get_param: "OS::stack_id"}
outputs:
server1_private_ip:
description: IP address of server1 in private network
value: { get_attr: [ server1, first_address ] }
server1_public_ip:
description: Floating IP address of server1 in public network
value: { get_attr: [ server1_floating_ip, floating_ip_address ] }
asg_size:
value: {get_attr: [asg, current_size]}

View File

@ -1,137 +0,0 @@
heat_template_version: 2013-05-23
description: >
HOT template to created resources deployed by scenario002.
parameters:
key_name:
type: string
description: Name of keypair to assign to servers
default: 'pingtest_key'
image:
type: string
description: Name of image to use for servers
default: 'pingtest_image'
public_net_name:
type: string
default: 'nova'
description: >
ID or name of public network for which floating IP addresses will be allocated
private_net_name:
type: string
description: Name of private network to be created
default: 'default-net'
private_net_cidr:
type: string
description: Private network address (CIDR notation)
default: '192.168.2.0/24'
private_net_gateway:
type: string
description: Private network gateway address
default: '192.168.2.1'
private_net_pool_start:
type: string
description: Start of private network IP address allocation pool
default: '192.168.2.100'
private_net_pool_end:
type: string
default: '192.168.2.200'
description: End of private network IP address allocation pool
resources:
key_pair:
type: OS::Nova::KeyPair
properties:
save_private_key: true
name: {get_param: key_name }
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: private_net_name }
private_subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: private_net }
cidr: { get_param: private_net_cidr }
gateway_ip: { get_param: private_net_gateway }
allocation_pools:
- start: { get_param: private_net_pool_start }
end: { get_param: private_net_pool_end }
router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: { get_param: public_net_name }
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource: router }
subnet_id: { get_resource: private_subnet }
volume1:
type: OS::Cinder::Volume
properties:
name: Volume1
image: { get_param: image }
size: 1
server1:
type: OS::Nova::Server
depends_on: volume1
properties:
name: Server1
block_device_mapping:
- device_name: vda
volume_id: { get_resource: volume1 }
flavor: { get_resource: test_flavor }
key_name: { get_resource: key_pair }
networks:
- port: { get_resource: server1_port }
server1_port:
type: OS::Neutron::Port
properties:
network_id: { get_resource: private_net }
fixed_ips:
- subnet_id: { get_resource: private_subnet }
security_groups: [{ get_resource: server_security_group }]
server1_floating_ip:
type: OS::Neutron::FloatingIP
# TODO: investigate why we need this depends_on and if we could
# replace it by router_id with get_resource: router_interface
depends_on: router_interface
properties:
floating_network: { get_param: public_net_name }
port_id: { get_resource: server1_port }
server_security_group:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for server
name: pingtest-security-group
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 22
port_range_max: 22
- remote_ip_prefix: 0.0.0.0/0
protocol: icmp
test_flavor:
type: OS::Nova::Flavor
properties:
ram: 512
vcpus: 1
outputs:
server1_private_ip:
description: IP address of server1 in private network
value: { get_attr: [ server1, first_address ] }
server1_public_ip:
description: Floating IP address of server1 in public network
value: { get_attr: [ server1_floating_ip, floating_ip_address ] }

View File

@ -1,135 +0,0 @@
heat_template_version: 2013-05-23
description: >
HOT template to created resources deployed by scenario003.
parameters:
key_name:
type: string
description: Name of keypair to assign to servers
default: 'pingtest_key'
image:
type: string
description: Name of image to use for servers
default: 'pingtest_image'
public_net_name:
type: string
default: 'nova'
description: >
ID or name of public network for which floating IP addresses will be allocated
private_net_name:
type: string
description: Name of private network to be created
default: 'default-net'
private_net_cidr:
type: string
description: Private network address (CIDR notation)
default: '192.168.2.0/24'
private_net_gateway:
type: string
description: Private network gateway address
default: '192.168.2.1'
private_net_pool_start:
type: string
description: Start of private network IP address allocation pool
default: '192.168.2.100'
private_net_pool_end:
type: string
default: '192.168.2.200'
description: End of private network IP address allocation pool
resources:
key_pair:
type: OS::Nova::KeyPair
properties:
save_private_key: true
name: {get_param: key_name }
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: private_net_name }
private_subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: private_net }
cidr: { get_param: private_net_cidr }
gateway_ip: { get_param: private_net_gateway }
allocation_pools:
- start: { get_param: private_net_pool_start }
end: { get_param: private_net_pool_end }
router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: { get_param: public_net_name }
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource: router }
subnet_id: { get_resource: private_subnet }
server1:
type: OS::Nova::Server
properties:
name: Server1
flavor: { get_resource: test_flavor }
image: { get_param: image }
key_name: { get_resource: key_pair }
networks:
- port: { get_resource: server1_port }
server1_port:
type: OS::Neutron::Port
properties:
network_id: { get_resource: private_net }
fixed_ips:
- subnet_id: { get_resource: private_subnet }
security_groups: [{ get_resource: server_security_group }]
server1_floating_ip:
type: OS::Neutron::FloatingIP
# TODO: investigate why we need this depends_on and if we could
# replace it by router_id with get_resource: router_interface
depends_on: router_interface
properties:
floating_network: { get_param: public_net_name }
port_id: { get_resource: server1_port }
server_security_group:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for server
name: pingtest-security-group
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 22
port_range_max: 22
- remote_ip_prefix: 0.0.0.0/0
protocol: icmp
test_flavor:
type: OS::Nova::Flavor
properties:
ram: 512
vcpus: 1
sahara-image:
type: OS::Sahara::ImageRegistry
properties:
image: { get_param: image }
username: cirros
tags:
- tripleo
outputs:
server1_private_ip:
description: IP address of server1 in private network
value: { get_attr: [ server1, first_address ] }
server1_public_ip:
description: Floating IP address of server1 in public network
value: { get_attr: [ server1_floating_ip, floating_ip_address ] }

View File

@ -1,56 +0,0 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../heat-templates/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../heat-templates/net-config-multinode.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronServer
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::MongoDb
- OS::TripleO::Services::Redis
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier
- OS::TripleO::Services::AodhListener
- OS::TripleO::Services::CeilometerApi
- OS::TripleO::Services::CeilometerCollector
- OS::TripleO::Services::CeilometerExpirer
- OS::TripleO::Services::CeilometerAgentCentral
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
# we don't deploy Swift so we switch to file backend.
GlanceBackend: 'file'
GnocchiBackend: 'file'

View File

@ -1,46 +0,0 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../heat-templates/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../heat-templates/net-config-multinode.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronServer
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::SwiftProxy
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true

View File

@ -1,45 +0,0 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../heat-templates/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../heat-templates/net-config-multinode.yaml
OS::TripleO::Services::SaharaApi: /usr/share/openstack-tripleo-heat-templates/puppet/services/sahara-api.yaml
OS::TripleO::Services::SaharaEngine: /usr/share/openstack-tripleo-heat-templates/puppet/services/sahara-engine.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronServer
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::SaharaApi
- OS::TripleO::Services::SaharaEngine
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
# we don't deploy Swift so we switch to file backend.
GlanceBackend: 'file'

View File

@ -86,11 +86,15 @@ export TEST_OVERCLOUD_DELETE=0
export OOOQ=0
if [[ $TOCI_JOBTYPE =~ scenario ]]; then
export PINGTEST_TEMPLATE=${PINGTEST_TEMPLATE:-"${TOCI_JOBTYPE}-pingtest"}
MULTINODE_ENV_NAME=$TOCI_JOBTYPE
# note: we don't need PINGTEST_TEMPLATE here. See tripleo.sh. Though
# we need to export it for logs purpose.
export PINGTEST_TEMPLATE=
export MULTINODE_ENV_NAME=$TOCI_JOBTYPE
MULTINODE_ENV_PATH=/usr/share/openstack-tripleo-heat-templates/ci/environments/$MULTINODE_ENV_NAME.yaml
else
export PINGTEST_TEMPLATE=${PINGTEST_TEMPLATE:-"tenantvm_floatingip"}
MULTINODE_ENV_NAME='multinode'
export MULTINODE_ENV_NAME='multinode'
MULTINODE_ENV_PATH=$TRIPLEO_ROOT/tripleo-ci/test-environments/$MULTINODE_ENV_NAME.yaml
fi
if [[ "$TOCI_JOBTYPE" =~ "periodic" && "$TOCI_JOBTYPE" =~ "-ha" ]]; then
TEST_OVERCLOUD_DELETE=1
@ -183,7 +187,7 @@ for JOB_TYPE_PART in $(sed 's/-/ /g' <<< "${TOCI_JOBTYPE:-}") ; do
UNDERCLOUD_SSL=0
INTROSPECT=0
OVERCLOUD_DEPLOY_ARGS="--libvirt-type=qemu -t $OVERCLOUD_DEPLOY_TIMEOUT"
OVERCLOUD_DEPLOY_ARGS="$OVERCLOUD_DEPLOY_ARGS -e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-environment.yaml -e $TRIPLEO_ROOT/tripleo-ci/test-environments/$MULTINODE_ENV_NAME.yaml --compute-scale 0 --overcloud-ssh-user $OVERCLOUD_SSH_USER --validation-errors-nonfatal"
OVERCLOUD_DEPLOY_ARGS="$OVERCLOUD_DEPLOY_ARGS -e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-environment.yaml -e $MULTINODE_ENV_PATH --compute-scale 0 --overcloud-ssh-user $OVERCLOUD_SSH_USER --validation-errors-nonfatal"
;;
undercloud)
TOCIRUNNER="./toci_instack_osinfra.sh"