Add test with detached haproxy role

- Add test with external load balancer. Controllers are
from different racks and haproxy is from rack-3
- Separate devops config with appropriate networks
assigned to nodes is used
- Local repos for cluster are used because public networks
are routed without internet connection
- OSTF isn't running because it's not implied to use
separate haproxy

Closes-Bug: #1583530

Change-Id: I0d3647c8eb13159c27e64ddf5925467f451b610c
This commit is contained in:
asledzinskiy 2016-04-12 18:13:16 +03:00
parent 1174437a0f
commit 7a5970fa64
6 changed files with 684 additions and 0 deletions

View File

@ -680,6 +680,11 @@ Test for separate keystone service and ceph
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_keystone_ceph
:members:
Test for separate haproxy service
---------------------------------
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_haproxy
:members:
Test for separate horizon service
---------------------------------
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_horizon

View File

@ -132,6 +132,11 @@ MULTIPLE_NETWORKS_TEMPLATE = os.environ.get(
'MULTIPLE_NETWORKS_TEMPLATE',
os.path.join(os.getcwd(),
'system_test/tests_templates/tests_configs/multirack.yaml'))
EXTERNAL_HAPROXY_TEMPLATE = os.environ.get(
'EXTERNAL_HAPROXY_TEMPLATE',
os.path.join(os.getcwd(),
'system_test/tests_templates/tests_configs/'
'external_haproxy.yaml'))
if MULTIPLE_NETWORKS:
NODEGROUPS = (
@ -483,6 +488,10 @@ SEPARATE_SERVICE_HORIZON_PLUGIN_PATH = os.environ.get(
ETCKEEPER_PLUGIN_REPO = os.environ.get(
'ETCKEEPER_PLUGIN_REPO',
'https://github.com/Mirantis/fuel-plugin-etckeeper')
SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH = os.environ.get(
'SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH')
SEPARATE_SERVICE_BALANCER_PLUGIN_PATH = os.environ.get(
'SEPARATE_SERVICE_BALANCER_PLUGIN_PATH')
FUEL_STATS_CHECK = get_var_as_bool('FUEL_STATS_CHECK', False)
FUEL_STATS_ENABLED = get_var_as_bool('FUEL_STATS_ENABLED', True)

View File

@ -27,10 +27,12 @@ from fuelweb_test.helpers.utils import get_test_method_name
from fuelweb_test.helpers.utils import TimeStat
from fuelweb_test.helpers.ssh_manager import SSHManager
from fuelweb_test.models.environment import EnvironmentModel
from fuelweb_test.settings import EXTERNAL_HAPROXY_TEMPLATE
from fuelweb_test.settings import MULTIPLE_NETWORKS
from fuelweb_test.settings import MULTIPLE_NETWORKS_TEMPLATE
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS_ONLY_ONCE
from fuelweb_test.settings import SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH
from gates_tests.helpers import exceptions
@ -345,6 +347,8 @@ class SetupEnvironment(TestBasic):
if MULTIPLE_NETWORKS:
from system_test.core.discover import load_yaml
self._devops_config = load_yaml(MULTIPLE_NETWORKS_TEMPLATE)
if SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH:
self._devops_config = load_yaml(EXTERNAL_HAPROXY_TEMPLATE)
self.check_run("empty")

View File

@ -0,0 +1,171 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis import asserts
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers import utils
from fuelweb_test.settings import NEUTRON
from fuelweb_test.settings import NEUTRON_SEGMENT
from fuelweb_test.settings import NODEGROUPS
from fuelweb_test.settings import MIRROR_UBUNTU
from fuelweb_test.settings import MULTIPLE_NETWORKS
from fuelweb_test.settings import SEPARATE_SERVICE_BALANCER_PLUGIN_PATH
from fuelweb_test.settings import SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase
from gates_tests.helpers import exceptions
@test(groups=["multiracks_2"])
class TestSeparateHaproxy(TestNetworkTemplatesBase):
"""Test for verification of deployment with detached haproxy role."""
@test(depends_on=[SetupEnvironment.prepare_release],
groups=["separate_haproxy"])
@log_snapshot_after_test
def separate_haproxy(self):
"""Deploy HA environment with separate Haproxy.
Scenario:
1. Revert snapshot with ready master node
2. Copy and install external-lb and detach-haproxy plugins
3. Bootstrap 3 slaves from default nodegroup
4. Create cluster with Neutron VXLAN and custom nodegroups
5. Run 'fuel-mirror' to replace cluster repositories
with local mirrors
6. Bootstrap 2 slaves nodes from second nodegroup
and one node from third node group
7. Enable plugins for cluster
8. Add 2 controllers from default nodegroup and 1 controller
from second node group
9. Add 1 compute+cinder from default node group
and 1 compute+cinder from second node group
10. Add haproxy node from third node group
11. Verify networks
12. Deploy cluster
Duration 120m
Snapshot separate_haproxy
"""
if not MULTIPLE_NETWORKS:
raise exceptions.FuelQAVariableNotSet(
'MULTIPLE_NETWORKS', 'true')
self.show_step(1)
self.env.revert_snapshot('ready')
self.show_step(2)
utils.upload_tarball(
ip=self.ssh_manager.admin_ip,
tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH,
tar_target="/var")
utils.upload_tarball(
ip=self.ssh_manager.admin_ip,
tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH,
tar_target="/var")
utils.install_plugin_check_code(
ip=self.ssh_manager.admin_ip,
plugin=os.path.basename(
SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH))
utils.install_plugin_check_code(
ip=self.ssh_manager.admin_ip,
plugin=os.path.basename(
SEPARATE_SERVICE_BALANCER_PLUGIN_PATH))
self.show_step(3)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
self.show_step(4)
admin_ip = self.ssh_manager.admin_ip
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
settings={
'net_provider': NEUTRON,
'net_segment_type': NEUTRON_SEGMENT['tun'],
'tenant': 'separatehaproxy',
'user': 'separatehaproxy',
'password': 'separatehaproxy',
'ntp_list': [admin_ip],
}
)
self.show_step(5)
if MIRROR_UBUNTU != '':
ubuntu_url = MIRROR_UBUNTU.split()[1]
replace_cmd = \
"sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \
" /usr/share/fuel-mirror/ubuntu.yaml".format(
ubuntu_url)
self.ssh_manager.execute_on_remote(ip=admin_ip,
cmd=replace_cmd)
create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu'
self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd)
apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \
'--env {0} --replace'.format(cluster_id)
self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd)
self.show_step(6)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7])
self.show_step(7)
plugin_name = 'detach_haproxy'
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
asserts.assert_true(
self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
msg)
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
plugin_name = 'external_loadbalancer'
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
asserts.assert_true(
self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
msg)
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
self.show_step(8)
self.show_step(9)
self.show_step(10)
nodegroup1 = NODEGROUPS[0]['name']
nodegroup2 = NODEGROUPS[1]['name']
nodegroup3 = NODEGROUPS[2]['name']
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': [['controller'], nodegroup1],
'slave-02': [['controller'], nodegroup1],
'slave-04': [['compute', 'cinder'], nodegroup2],
'slave-05': [['controller'], nodegroup2],
'slave-03': [['compute', 'cinder'], nodegroup1],
'slave-07': [['standalone-haproxy'], nodegroup3]
}
)
self.show_step(11)
self.fuel_web.verify_network(cluster_id)
self.show_step(12)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60,
check_services=False)
self.env.make_snapshot('separate_haproxy')

View File

@ -0,0 +1,451 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default_interface_model:
- &interface_model !os_env INTERFACE_MODEL, e1000
default-slave-interfaces: &default-slave-interfaces
- label: enp0s3
l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: enp0s4
l2_network_device: public
interface_model: *interface_model
- label: enp0s5
l2_network_device: management
interface_model: *interface_model
- label: enp0s6
l2_network_device: private
interface_model: *interface_model
- label: enp0s7
l2_network_device: storage
interface_model: *interface_model
- label: enp0s13
l2_network_device: public3
interface_model: *interface_model
- label: enp0s14
l2_network_device: management3
interface_model: *interface_model
rack-02-slave-interfaces: &rack-02-slave-interfaces
- label: enp0s3
l2_network_device: admin2 # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: enp0s4
l2_network_device: public2
interface_model: *interface_model
- label: enp0s5
l2_network_device: management2
interface_model: *interface_model
- label: enp0s6
l2_network_device: private2
interface_model: *interface_model
- label: enp0s7
l2_network_device: storage
interface_model: *interface_model
- label: enp0s13
l2_network_device: public3
interface_model: *interface_model
- label: enp0s14
l2_network_device: management3
interface_model: *interface_model
rack-03-slave-interfaces: &rack-03-slave-interfaces
- label: enp0s3
l2_network_device: admin3 # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: enp0s4
l2_network_device: public3
interface_model: *interface_model
- label: enp0s5
l2_network_device: management3
interface_model: *interface_model
- label: enp0s6
l2_network_device: private3
interface_model: *interface_model
- label: enp0s7
l2_network_device: storage
interface_model: *interface_model
- label: enp0s13
l2_network_device: public
interface_model: *interface_model
- label: enp0s14
l2_network_device: management
interface_model: *interface_model
- label: enp0s15
l2_network_device: public2
interface_model: *interface_model
- label: enp0s16
l2_network_device: management2
interface_model: *interface_model
default-slave-network_config: &default-slave-network_config
enp0s3:
networks:
- fuelweb_admin
enp0s4:
networks:
- public
enp0s5:
networks:
- management
enp0s6:
networks:
- private
enp0s7:
networks:
- storage
rack-02-slave-network_config: &rack-02-slave-network_config
enp0s3:
networks:
- fuelweb_admin2
enp0s4:
networks:
- public2
enp0s5:
networks:
- management2
enp0s6:
networks:
- private2
enp0s7:
networks:
- storage
rack-03-slave-network_config: &rack-03-slave-network_config
enp0s3:
networks:
- fuelweb_admin3
enp0s4:
networks:
- public3
enp0s5:
networks:
- management3
enp0s6:
networks:
- private3
enp0s7:
networks:
- storage
default-slave-node-params: &default-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *default-slave-interfaces
network_config: *default-slave-network_config
rack-02-slave-node-params: &rack-02-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *rack-02-slave-interfaces
network_config: *rack-02-slave-network_config
rack-03-slave-node-params: &rack-03-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *rack-03-slave-interfaces
network_config: *rack-03-slave-network_config
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
public-pool01:
net: *pool_default
params:
tag: 0
storage-pool01:
net: *pool_default
params:
tag: 101
management-pool01:
net: *pool_default
params:
tag: 102
private-pool01:
net: *pool_default
params:
tag: 103
fuelweb_admin-pool02:
net: *pool_default
params:
tag: 0
public-pool02:
net: *pool_default
params:
tag: 0
management-pool02:
net: *pool_default
params:
tag: 102
private-pool02:
net: *pool_default
params:
tag: 103
fuelweb_admin-pool03:
net: *pool_default
params:
tag: 0
public-pool03:
net: *pool_default
params:
tag: 0
management-pool03:
net: *pool_default
params:
tag: 102
private-pool03:
net: *pool_default
params:
tag: 103
groups:
- name: default
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
public:
address_pool: public-pool01
dhcp: false
forward:
mode: route
storage:
address_pool: storage-pool01
dhcp: false
forward:
mode: route
management:
address_pool: management-pool01
dhcp: false
forward:
mode: route
private:
address_pool: private-pool01
dhcp: false
forward:
mode: route
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: enp0s3
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
interface_model: *interface_model
- label: enp0s4
l2_network_device: admin2
interface_model: *interface_model
- label: enp0s5
l2_network_device: admin3
interface_model: *interface_model
network_config:
enp0s3:
networks:
- fuelweb_admin
- name: slave-01
role: fuel_slave
params: *default-slave-node-params
- name: slave-02
role: fuel_slave
params: *default-slave-node-params
- name: slave-03
role: fuel_slave
params: *default-slave-node-params
- name: rack-02
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool02
public: public-pool02
storage: storage-pool01
management: management-pool02
private: private-pool02
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin2:
address_pool: fuelweb_admin-pool02
dhcp: false
forward:
mode: nat
public2:
address_pool: public-pool02
dhcp: false
forward:
mode: route
management2:
address_pool: management-pool02
dhcp: false
forward:
mode: route
private2:
address_pool: private-pool02
dhcp: false
forward:
mode: route
nodes:
- name: slave-04
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-05
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-06
role: fuel_slave
params: *rack-02-slave-node-params
- name: rack-03
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool03
public: public-pool03
storage: storage-pool01
management: management-pool03
private: private-pool03
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin3:
address_pool: fuelweb_admin-pool03
dhcp: false
forward:
mode: nat
public3:
address_pool: public-pool03
dhcp: false
forward:
mode: route
management3:
address_pool: management-pool03
dhcp: false
forward:
mode: route
private3:
address_pool: private-pool03
dhcp: false
forward:
mode: route
nodes:
- name: slave-07
role: fuel_slave
params: *rack-03-slave-node-params

View File

@ -0,0 +1,44 @@
---
network-config: &network-config
provider: neutron
segment-type: tun
pubip-to-all: false
storages-config: &storages-config
volume-lvm: false
volume-ceph: true
image-ceph: true
rados-ceph: true
ephemeral-ceph: false
replica-ceph: 2
nodes: &nodes
- roles:
- controller
count: 3
nodegroup: default
- roles:
- compute
count: 1
nodegroup: rack-02
- roles:
- ceph-osd
count: 2
nodegroup: rack-03
template:
name: 3 Controller, 1 Compute, 2 Ceph on Neutron/VLAN
slaves: 6
devops_settings: !include devops_configs/external_haproxy.yaml
cluster_template: &environment-config
name: env1
release: ubuntu
settings:
components:
sahara: false
murano: false
ceilometer: false
storages: *storages-config
network: *network-config
nodes: *nodes