Merge Neutron AutoScaling and LoadBalancer tests

The tested use case is an autoscaling group of web app servers
behind a loadbalancer.

Test template was rewritten to use AutoScalingGroup, wait conditions and
outputs, so no other client than heat one is used.
Now there is no need to check for VM connectivity,
as stack/resources status COMPLETE now quite reliably means
that the listening servers are running.

This patch should probably also help with narrowing down the causes of
bug 1437203, since it does not checks instance connectivity with SSH.

Change-Id: Iec8e8061f9ab3f3841fa221722b7b7805760cdf7
Related-Bug: #1437203
Closes-Bug: #1435285
This commit is contained in:
Pavlo Shchelokovskyy 2015-03-19 17:54:52 +00:00
parent f8f4307f66
commit 1bc886c89d
8 changed files with 327 additions and 369 deletions

View File

@ -0,0 +1,65 @@
heat_template_version: 2015-10-15
description: |
App server that is a member of Neutron Pool.
parameters:
image:
type: string
flavor:
type: string
net:
type: string
sec_group:
type: string
pool_id:
type: string
app_port:
type: number
timeout:
type: number
resources:
config:
type: OS::Test::WebAppConfig
properties:
app_port: { get_param: app_port }
wc_curl_cli: { get_attr: [ handle, curl_cli ] }
server:
type: OS::Nova::Server
properties:
image: { get_param: image }
flavor: { get_param: flavor }
networks:
- network: { get_param: net }
security_groups:
- { get_param: sec_group }
user_data_format: RAW
user_data: { get_resource: config }
handle:
type: OS::Heat::WaitConditionHandle
waiter:
type: OS::Heat::WaitCondition
depends_on: server
properties:
timeout: { get_param: timeout }
handle: { get_resource: handle }
pool_member:
type: OS::Neutron::PoolMember
depends_on: waiter
properties:
address: { get_attr: [ server, networks, { get_param: net }, 0 ] }
pool_id: { get_param: pool_id }
protocol_port: { get_param: app_port }

View File

@ -0,0 +1,35 @@
heat_template_version: 2015-10-15
description: |
Simplest web-app using netcat reporting only hostname.
Specifically tailored for minimal Cirros image.
parameters:
app_port:
type: number
wc_curl_cli:
type: string
resources:
webapp_nc:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: |
#! /bin/sh -v
Body=$(hostname)
Response="HTTP/1.1 200 OK\r\nContent-Length: ${#Body}\r\n\r\n$Body"
wc_notify --data-binary '{"status": "SUCCESS"}'
while true ; do echo -e $Response | nc -llp PORT; done
params:
PORT: { get_param: app_port }
wc_notify: { get_param: wc_curl_cli }
outputs:
OS::stack_id:
value: { get_resource: webapp_nc }

View File

@ -0,0 +1,113 @@
heat_template_version: 2015-04-30
description: |
Template which tests Neutron load balancing requests to members of
Heat AutoScalingGroup.
Instances must be running some webserver on a given app_port
producing HTTP response that is different between servers
but stable over time for given server.
parameters:
flavor:
type: string
image:
type: string
net:
type: string
subnet:
type: string
public_net:
type: string
app_port:
type: number
default: 8080
lb_port:
type: number
default: 80
timeout:
type: number
default: 600
resources:
sec_group:
type: OS::Neutron::SecurityGroup
properties:
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: { get_param: app_port }
port_range_max: { get_param: app_port }
asg:
type: OS::Heat::AutoScalingGroup
properties:
desired_capacity: 1
max_size: 2
min_size: 1
resource:
type: OS::Test::NeutronAppServer
properties:
image: { get_param: image }
flavor: { get_param: flavor }
net: { get_param: net}
sec_group: { get_resource: sec_group }
app_port: { get_param: app_port }
pool_id: { get_resource: pool }
timeout: { get_param: timeout }
scale_up:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: asg }
scaling_adjustment: 1
scale_down:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: asg }
scaling_adjustment: -1
health_monitor:
type: OS::Neutron::HealthMonitor
properties:
delay: 3
type: HTTP
timeout: 3
max_retries: 3
pool:
type: OS::Neutron::Pool
properties:
lb_method: ROUND_ROBIN
protocol: HTTP
subnet: { get_param: subnet }
monitors:
- { get_resource: health_monitor }
vip:
protocol_port: { get_param: lb_port }
floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: { get_param: public_net }
port_id:
{ get_attr: [pool, vip, 'port_id'] }
loadbalancer:
type: OS::Neutron::LoadBalancer
properties:
pool_id: { get_resource: pool }
protocol_port: { get_param: app_port }
outputs:
lburl:
description: URL of the loadbalanced app
value:
str_replace:
template: http://IP_ADDRESS:PORT
params:
IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] }
PORT: { get_param: lb_port }

View File

@ -1,56 +0,0 @@
heat_template_version: 2014-10-16
description: Auto-scaling Test
parameters:
image_id:
type: string
label: Image ID
description: Image ID from configurations
capacity:
type: string
label: Capacity
description: Auto-scaling group desired capacity
fixed_subnet:
type: string
label: fixed subnetwork ID
description: subnetwork ID used for autoscaling
instance_type:
type: string
label: instance_type
description: type of instance to launch
resources:
test_pool:
type: OS::Neutron::Pool
properties:
description: Test Pool
lb_method: ROUND_ROBIN
name: test_pool
protocol: HTTP
subnet: { get_param: fixed_subnet }
vip: {
"description": "Test VIP",
"protocol_port": 80,
"name": "test_vip"
}
load_balancer:
type: OS::Neutron::LoadBalancer
properties:
protocol_port: 80
pool_id: { get_resource: test_pool }
launch_config:
type: AWS::AutoScaling::LaunchConfiguration
properties:
ImageId: { get_param: image_id }
InstanceType: { get_param: instance_type }
server_group:
type: AWS::AutoScaling::AutoScalingGroup
properties:
AvailabilityZones : ["nova"]
LaunchConfigurationName : { get_resource : launch_config }
VPCZoneIdentifier: [{ get_param: fixed_subnet }]
MinSize : 1
MaxSize : 5
DesiredCapacity: { get_param: capacity }
LoadBalancerNames : [ { get_resource : load_balancer } ]

View File

@ -1,133 +0,0 @@
heat_template_version: 2014-10-16
description: |
Template which tests neutron load balancing resources
parameters:
key_name:
type: string
flavor:
type: string
image:
type: string
network:
type: string
private_subnet_id:
type: string
external_network_id:
type: string
port:
type: string
default: '80'
timeout:
type: number
resources:
sec_group:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for servers
name: security-group
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: { get_param: port }
port_range_max: { get_param: port }
- remote_ip_prefix: 0.0.0.0/0
protocol: icmp
wait_condition:
type: OS::Heat::WaitCondition
properties:
handle: { get_resource: wait_condition_handle }
count: 2
timeout: { get_param: timeout }
wait_condition_handle:
type: OS::Heat::WaitConditionHandle
config:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: |
#!/bin/bash -v
echo $(hostname) > index.html
python -m SimpleHTTPServer port &
wc_notify --data-binary '{"status": "SUCCESS"}'
params:
wc_notify: { get_attr: ['wait_condition_handle', 'curl_cli'] }
port: { get_param: port }
server1:
type: OS::Nova::Server
properties:
name: Server1
image: { get_param: image }
flavor: { get_param: flavor }
key_name: { get_param: key_name }
networks: [{network: {get_param: network} }]
security_groups: [{ get_resource: sec_group }]
user_data_format: SOFTWARE_CONFIG
user_data: { get_resource: config }
server2:
type: OS::Nova::Server
properties:
name: Server2
image: { get_param: image }
flavor: { get_param: flavor }
key_name: { get_param: key_name }
networks: [{network: {get_param: network} }]
security_groups: [{ get_resource: sec_group }]
user_data_format: SOFTWARE_CONFIG
user_data: { get_resource: config }
health_monitor:
type: OS::Neutron::HealthMonitor
properties:
delay: 3
type: HTTP
timeout: 3
max_retries: 3
test_pool:
type: OS::Neutron::Pool
properties:
lb_method: ROUND_ROBIN
protocol: HTTP
subnet: { get_param: private_subnet_id }
monitors:
- { get_resource: health_monitor }
vip:
protocol_port: { get_param: port }
floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: { get_param: external_network_id }
port_id:
{ get_attr: [test_pool, vip, 'port_id'] }
fixed_ip_address:
{ get_attr: [test_pool, vip, 'address'] }
LBaaS:
type: OS::Neutron::LoadBalancer
depends_on: wait_condition
properties:
pool_id: { get_resource: test_pool }
protocol_port: { get_param: port }
members:
- { get_resource: server1 }
outputs:
serv1_ip:
value: {get_attr: [server1, networks, { get_param: network }, 0]}
serv2_ip:
value: {get_attr: [server2, networks, { get_param: network }, 0]}
vip:
value: {get_attr: [test_pool, vip, address]}
fip:
value: {get_attr: [floating_ip, floating_ip_address]}

View File

@ -0,0 +1,114 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import requests
from heat_integrationtests.common import test
from heat_integrationtests.scenario import scenario_base
class AutoscalingLoadBalancerTest(scenario_base.ScenarioTestsBase):
"""
The class is responsible for testing ASG + LB scenario.
The very common use case tested is an autoscaling group
of some web application servers behind a loadbalancer.
"""
def setUp(self):
super(AutoscalingLoadBalancerTest, self).setUp()
self.template_name = 'test_autoscaling_lb_neutron.yaml'
self.app_server_template_name = 'app_server_neutron.yaml'
self.webapp_template_name = 'netcat-webapp.yaml'
def check_num_responses(self, url, expected_num, retries=10):
resp = set()
for count in range(retries):
time.sleep(1)
r = requests.get(url)
# skip unsuccessfull requests
if r.status_code == 200:
resp.add(r.text)
self.assertEqual(expected_num, len(resp))
def autoscale_complete(self, stack_id, expected_num):
res_list = self.client.resources.list(stack_id)
all_res_complete = all(res.resource_status in ('UPDATE_COMPLETE',
'CREATE_COMPLETE')
for res in res_list)
all_res = len(res_list) == expected_num
return all_res and all_res_complete
def test_autoscaling_loadbalancer_neutron(self):
"""
Check work of AutoScaing and Neutron LBaaS resource in Heat.
The scenario is the following:
1. Launch a stack with a load balancer and autoscaling group
of one server, wait until stack create is complete.
2. Check that there is only one distinctive response from
loadbalanced IP.
3. Signal the scale_up policy, wait until all resources in
autoscaling group are complete.
4. Check that now there are two distinctive responses from
loadbalanced IP.
"""
parameters = {
'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref,
'net': self.conf.fixed_network_name,
'subnet': self.conf.fixed_subnet_name,
'public_net': self.conf.floating_network_name,
'app_port': 8080,
'lb_port': 80,
'timeout': 600
}
app_server_template = self._load_template(
__file__, self.app_server_template_name, self.sub_dir
)
webapp_template = self._load_template(
__file__, self.webapp_template_name, self.sub_dir
)
files = {'appserver.yaml': app_server_template,
'webapp.yaml': webapp_template}
env = {'resource_registry':
{'OS::Test::NeutronAppServer': 'appserver.yaml',
'OS::Test::WebAppConfig': 'webapp.yaml'}}
# Launch stack
sid = self.launch_stack(
template_name=self.template_name,
parameters=parameters,
files=files,
environment=env
)
stack = self.client.stacks.get(sid)
lb_url = self._stack_output(stack, 'lburl')
# Check number of distinctive responces, must be 1
self.check_num_responses(lb_url, 1)
# Signal the scaling hook
self.client.resources.signal(sid, 'scale_up')
# Wait for AutoScalingGroup update to finish
asg = self.client.resources.get(sid, 'asg')
test.call_until_true(self.conf.build_timeout,
self.conf.build_interval,
self.autoscale_complete,
asg.physical_resource_id, 2)
# Check number of distinctive responses, must now be 2
self.check_num_responses(lb_url, 2)

View File

@ -1,72 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.scenario import scenario_base
class NeutronAutoscalingTest(scenario_base.ScenarioTestsBase):
"""
The class is responsible for testing of neutron resources autoscaling.
"""
def setUp(self):
super(NeutronAutoscalingTest, self).setUp()
if not self.conf.fixed_subnet_name:
raise self.skipException("No sub-network configured to test")
self.template_name = 'test_neutron_autoscaling.yaml'
def test_neutron_autoscaling(self):
"""
Check autoscaling of load balancer members in Heat.
The alternative scenario is the following:
1. Launch a stack with a load balancer.
2. Check that the load balancer created
one load balancer member for stack.
3. Update stack definition: increase desired capacity of stack.
4. Check that number of members in load balancer was increased.
"""
parameters = {
"image_id": self.conf.minimal_image_ref,
"capacity": "1",
"instance_type": self.conf.minimal_instance_type,
"fixed_subnet": self.net['subnets'][0],
}
# Launch stack
stack_id = self.launch_stack(
template_name=self.template_name,
parameters=parameters
)
# Check number of members
pool_resource = self.client.resources.get(stack_id, 'test_pool')
pool_members = self.network_client.list_members(
pool_id=pool_resource.physical_resource_id)['members']
self.assertEqual(1, len(pool_members))
# Increase desired capacity and update the stack
template = self._load_template(
__file__, self.template_name, self.sub_dir
)
parameters["capacity"] = "2"
self.update_stack(
stack_id,
template=template,
parameters=parameters
)
# Check number of members
pool_members = self.network_client.list_members(
pool_id=pool_resource.physical_resource_id)['members']
self.assertEqual(2, len(pool_members))

View File

@ -1,108 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from six.moves import urllib
from heat_integrationtests.scenario import scenario_base
class NeutronLoadBalancerTest(scenario_base.ScenarioTestsBase):
"""
The class is responsible for testing of neutron resources balancer.
"""
def setUp(self):
super(NeutronLoadBalancerTest, self).setUp()
self.public_net = self._get_network(self.conf.floating_network_name)
self.template_name = 'test_neutron_loadbalancer.yaml'
def collect_responses(self, ip, expected_resp):
resp = set()
for count in range(10):
time.sleep(1)
resp.add(urllib.request.urlopen('http://%s/' % ip).read())
self.assertEqual(expected_resp, resp)
def test_neutron_loadbalancer(self):
"""
Check work of Neutron LBaaS resource in Heat.
The alternative scenario is the following:
1. Launch a stack with a load balancer, two servers,
but use only one as a LB member.
2. Check connection to the servers and LB.
3. Collect info about responces, which were received by LB from
its members (responces have to be received only from 'server1').
4. Update stack definition: include 'server2' into LBaaS.
5. Check that number of members in LB was increased and
responces were received from 'server1' and 'server2'.
"""
parameters = {
'key_name': self.keypair_name,
'flavor': self.conf.minimal_instance_type,
'image': self.conf.image_ref,
'network': self.net['name'],
'private_subnet_id': self.net['subnets'][0],
'external_network_id': self.public_net['id'],
'timeout': self.conf.build_timeout
}
# Launch stack
sid = self.launch_stack(
template_name=self.template_name,
parameters=parameters
)
stack = self.client.stacks.get(sid)
floating_ip = self._stack_output(stack, 'fip')
vip = self._stack_output(stack, 'vip')
server1_ip = self._stack_output(stack, 'serv1_ip')
server2_ip = self._stack_output(stack, 'serv2_ip')
# Check connection and info about received responses
self.check_connectivity(server1_ip)
self.collect_responses(server1_ip, {'server1\n'})
self.check_connectivity(server2_ip)
self.collect_responses(server2_ip, {'server2\n'})
self.check_connectivity(vip)
self.collect_responses(vip, {'server1\n'})
self.check_connectivity(floating_ip)
self.collect_responses(floating_ip, {'server1\n'})
# Include 'server2' to LB and update the stack
template = self._load_template(
__file__, self.template_name, self.sub_dir
)
template = template.replace(
'- { get_resource: server1 }',
'- { get_resource: server1 }\n - { get_resource: server2 }\n'
)
self.update_stack(
sid,
template=template,
parameters=parameters
)
self.check_connectivity(vip)
self.collect_responses(vip, {'server1\n', 'server2\n'})
self.check_connectivity(floating_ip)
self.collect_responses(floating_ip, {'server1\n', 'server2\n'})