Test public API

Add two tests with the following steps:

Deploy environment with enabled DMZ network for API.

        Scenario:
            1. Revert snapshot with ready master node
            2. Create new environment
            3. Run network verification
            4. Deploy the environment
            5. Run network verification
            6. Run OSTF
            7. Reboot cluster nodes
            8. Run OSTF
            9. Create environment snapshot deploy_env_with_public_api

Check that security rules are properly applied for DMZ network

        Scenario:
            1. Revert snapshot from previous test
            2. Run instance
            3. Try to access horizon from instance
            4. Remove instance

Implements: blueprint test-separate-public-floating
Change-Id: I70474b5cab324aa4f4a042127d4e6961c95010bf
This commit is contained in:
Maksym Strukov 2016-07-19 23:22:55 +03:00
parent 48b7a15dac
commit 56e6b2b7cd
9 changed files with 675 additions and 1 deletions

View File

@ -272,6 +272,11 @@ Test offloading types
.. automodule:: fuelweb_test.tests.test_offloading_types
:members:
Test public api
---------------
.. automodule:: fuelweb_test.tests.test_public_api
:members:
Test Pull Requests
------------------
.. automodule:: fuelweb_test.tests.test_pullrequest

View File

@ -64,7 +64,8 @@ def check_cinder_status(ip):
cmd = '. openrc; cinder service-list'
result = ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd
cmd=cmd,
raise_on_assert=False
)
cinder_services = result['stdout_str']
logger.debug('>$ cinder service-list\n{}'.format(cinder_services))

View File

@ -3004,6 +3004,15 @@ class FuelWebClient29(object):
if force_exception:
raise
def get_network_pool(self, pool_name, group_name=None):
net = self.environment.d_env.get_network(name=pool_name)
_net_pool = {
"gateway": net.default_gw,
"network": net.ip_network
}
return _net_pool
class FuelWebClient30(FuelWebClient29):
"""FuelWebClient that works with fuel-devops 3.0
@ -3278,6 +3287,17 @@ class FuelWebClient30(FuelWebClient29):
return network_configuration
@logwrap
def get_network_pool(self, pool_name, group_name='default'):
group = self.environment.d_env.get_group(name=group_name)
net_pool = group.get_network_pool(name=pool_name)
_net_pool = {
"gateway": net_pool.gateway,
"network": net_pool.ip_range
}
return _net_pool
# TODO(ddmitriev): this code will be removed after moving to fuel-devops3.0
# pylint: disable=no-member
# noinspection PyUnresolvedReferences

View File

@ -0,0 +1,168 @@
adv_net_template:
default:
network_assignments:
fuelweb_admin:
ep: br-fw-admin
management:
ep: br-mgmt
os-api:
ep: br-osapi
private:
ep: br-prv
public:
ep: br-ex
storage:
ep: br-storage
network_scheme:
admin:
endpoints:
- br-fw-admin
roles:
admin/pxe: br-fw-admin
fw-admin: br-fw-admin
transformations:
- action: add-br
name: br-fw-admin
- action: add-port
bridge: br-fw-admin
name: <% if1 %>
management:
endpoints:
- br-mgmt
roles:
ceilometer/api: br-mgmt
ceph/radosgw: br-mgmt
ceph/replication: br-mgmt
cinder/api: br-mgmt
cinder/iscsi: br-mgmt
glance/api: br-mgmt
heat/api: br-mgmt
horizon: br-mgmt
ironic/api: br-mgmt
ironic/baremetal: br-mgmt
keystone/api: br-mgmt
management: br-mgmt
mgmt/api: br-mgmt
mgmt/corosync: br-mgmt
mgmt/database: br-mgmt
mgmt/memcache: br-mgmt
mgmt/messaging: br-mgmt
mgmt/vip: br-mgmt
mongo/db: br-mgmt
murano/api: br-mgmt
murano/cfapi: br-mgmt
neutron/api: br-mgmt
neutron/mesh: br-mgmt
nova/api: br-mgmt
nova/migration: br-mgmt
sahara/api: br-mgmt
swift/api: br-mgmt
swift/replication: br-mgmt
transformations:
- action: add-br
name: br-mgmt
- action: add-port
bridge: br-mgmt
name: <% if4 %>
os-api:
endpoints:
- br-osapi
roles:
public/vip: br-osapi
transformations:
- action: add-br
name: br-osapi
- action: add-port
bridge: br-osapi
name: <% if3 %>
private:
endpoints:
- br-prv
roles:
neutron/private: br-prv
private: br-prv
transformations:
- action: add-br
name: br-prv
provider: ovs
- action: add-br
name: br-aux
- action: add-patch
bridges:
- br-prv
- br-aux
mtu: 65000
provider: ovs
- action: add-port
bridge: br-aux
name: <% if5 %>
public:
endpoints:
- br-ex
roles:
ex: br-ex
neutron/floating: br-ex
public: br-ex
transformations:
- action: add-br
name: br-ex
- action: add-br
name: br-floating
provider: ovs
- action: add-patch
bridges:
- br-floating
- br-ex
mtu: 65000
provider: ovs
- action: add-port
bridge: br-ex
name: <% if2 %>
storage:
endpoints:
- br-storage
roles:
ceph/public: br-storage
ceph/radosgw: br-storage
ceph/replication: br-storage
storage: br-storage
transformations:
- action: add-br
name: br-storage
- action: add-port
bridge: br-storage
name: <% if6 %>.101
nic_mapping:
default:
if1: enp0s3 # admin
if2: enp0s4 # public
if3: enp0s5 # os-api
if4: enp0s6 # management
if5: enp0s7 # private
if6: enp0s8 # storage
templates_for_node_role:
ceph-osd:
- admin
- public
- management
- private
- storage
cinder:
- admin
- public
- management
- private
- storage
compute:
- admin
- public
- management
- private
- storage
controller:
- admin
- public
- os-api
- management
- private
- storage

View File

@ -135,6 +135,12 @@ if MULTIPATH and not SLAVE_MULTIPATH_DISKS_COUNT:
SLAVE_MULTIPATH_DISKS_COUNT = int(
os.environ.get('SLAVE_MULTIPATH_DISKS_COUNT'))
ENABLE_DMZ = get_var_as_bool('ENABLE_DMZ', False)
ENABLE_DMZ_TEMPLATE = os.environ.get(
'ENABLE_DMZ_TEMPLATE',
os.path.join(os.getcwd(),
'system_test/tests_templates/tests_configs/public_api.yaml'))
MULTIPLE_NETWORKS = get_var_as_bool('MULTIPLE_NETWORKS', False)
MULTIPLE_NETWORKS_TEMPLATE = os.environ.get(
'MULTIPLE_NETWORKS_TEMPLATE',

View File

@ -35,6 +35,8 @@ from fuelweb_test.settings import CUSTOM_FUEL_SETTING_YAML
from fuelweb_test.settings import EXTERNAL_HAPROXY_TEMPLATE
from fuelweb_test.settings import MULTIPLE_NETWORKS
from fuelweb_test.settings import MULTIPLE_NETWORKS_TEMPLATE
from fuelweb_test.settings import ENABLE_DMZ
from fuelweb_test.settings import ENABLE_DMZ_TEMPLATE
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS_ONLY_ONCE
from fuelweb_test.settings import SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH
@ -361,6 +363,9 @@ class SetupEnvironment(TestBasic):
if USE_HAPROXY_TEMPLATE and SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH:
from system_test.core.discover import load_yaml
self._devops_config = load_yaml(EXTERNAL_HAPROXY_TEMPLATE)
if ENABLE_DMZ:
from system_test.core.discover import load_yaml
self._devops_config = load_yaml(ENABLE_DMZ_TEMPLATE)
self.check_run("empty")

View File

@ -0,0 +1,222 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devops.error import TimeoutError
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from devops.helpers.helpers import wait_pass
from devops.helpers.ssh_client import SSHAuth
from proboscis import asserts
from proboscis import test
from fuelweb_test import logger
from fuelweb_test import settings
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers import utils
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase
cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS)
@test(groups=["public_api"])
class TestPublicApi(TestNetworkTemplatesBase):
"""TestPublicApi."""
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=['deploy_env_with_public_api'])
@log_snapshot_after_test
def deploy_env_with_public_api(self):
"""Deploy environment with enabled DMZ network for API.
Scenario:
1. Revert snapshot with ready master node
2. Create new environment
3. Run network verification
4. Deploy the environment
5. Run network verification
6. Run OSTF
7. Reboot cluster nodes
8. Run OSTF
9. Create environment snapshot deploy_env_with_public_api
Duration 120m
Snapshot deploy_env_with_public_api
"""
asserts.assert_true(settings.ENABLE_DMZ,
"ENABLE_DMZ variable wasn't exported")
self.check_run('deploy_env_with_public_api')
self.show_step(1)
self.env.revert_snapshot('ready_with_5_slaves')
self.show_step(2)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder'],
},
update_interfaces=False
)
network_template = utils.get_network_template('public_api')
self.fuel_web.client.upload_network_template(
cluster_id=cluster_id, network_template=network_template)
net = self.fuel_web.get_network_pool('os-api')
nodegroup = self.fuel_web.get_nodegroup(cluster_id)
os_api_template = {
"group_id": nodegroup['id'],
"name": 'os-api',
"cidr": net['network'],
"gateway": net['gateway'],
"meta": {
'notation': 'cidr',
'render_type': None,
'map_priority': 2,
'configurable': True,
'use_gateway': True,
'name': 'os-api',
'cidr': net['network'],
'vlan_start': None,
'vips': ['haproxy']
}
}
self.fuel_web.client.add_network_group(os_api_template)
logger.debug('Networks: {0}'.format(
self.fuel_web.client.get_network_groups()))
self.show_step(3)
self.fuel_web.verify_network(cluster_id)
self.show_step(4)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
self.show_step(5)
self.fuel_web.verify_network(cluster_id)
self.show_step(6)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.show_step(7)
nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
self.fuel_web.warm_restart_nodes(
self.fuel_web.get_devops_nodes_by_nailgun_nodes(nodes))
controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id=cluster_id,
roles=['controller']
)[0]
controller_devops = \
self.fuel_web.get_devops_node_by_nailgun_node(controller)
# Wait until MySQL Galera is UP on some controller
self.fuel_web.wait_mysql_galera_is_up([controller_devops.name])
# Wait until Cinder services UP on a controller
self.fuel_web.wait_cinder_is_up([controller_devops.name])
wait_pass(
lambda: self.fuel_web.run_ostf(cluster_id,
test_sets=['sanity', 'smoke']),
interval=10,
timeout=12 * 60
)
self.show_step(8)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.show_step(9)
self.env.make_snapshot('deploy_env_with_public_api', is_make=True)
@test(depends_on=[deploy_env_with_public_api],
groups=['public_api_check_security_rules'])
@log_snapshot_after_test
def public_api_check_security_rules(self):
"""Check that security rules are properly applied for DMZ network
Scenario:
1. Revert snapshot from previous test
2. Run instance
3. Try to access horizon from instance
4. Remove instance
"""
self.show_step(1)
self.env.revert_snapshot('deploy_env_with_public_api')
self.show_step(2)
cluster_id = self.fuel_web.get_last_created_cluster()
controller_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(
controller_ip,
user='admin',
passwd='admin',
tenant='admin')
# create instance
net_name = self.fuel_web.get_cluster_predefined_networks_name(
cluster_id)['private_net']
vm = os_conn.create_server_for_migration(neutron=True, label=net_name)
# Check if instance active
os_conn.verify_instance_status(vm, 'ACTIVE')
vm_floating_ip = os_conn.assign_floating_ip(vm)
logger.info('Trying to get vm via tcp.')
try:
wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(vm_floating_ip.ip))
logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip))
self.show_step(3)
attributes = self.fuel_web.client.get_cluster_attributes(cluster_id)
protocol = 'https' if attributes['editable']['public_ssl']['horizon'][
'value'] is False else 'http'
cmd = 'curl -I ' \
'{proto}://{ip}/horizon --insecure'.format(proto=protocol,
ip=controller_ip)
logger.info('Trying to access horizon from instance: {}'.format(cmd))
controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id=cluster_id,
roles=['controller']
)[0]
ssh = self.fuel_web.get_ssh_for_nailgun_node(controller)
res = ssh.execute_through_host(hostname=vm_floating_ip.ip,
cmd=cmd,
auth=cirros_auth)
logger.info(res.stdout)
asserts.assert_equal(res.exit_code, 0,
"Instance can't access "
"horizon via DMZ network")
self.show_step(4)
# delete instance
os_conn.delete_instance(vm)
os_conn.verify_srv_deleted(vm)

View File

@ -0,0 +1,206 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default_interface_model:
- &interface_model !os_env INTERFACE_MODEL, e1000
rack-01-slave-interfaces: &rack-01-slave-interfaces
- label: eth0
l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: eth1
l2_network_device: public
interface_model: *interface_model
- label: eth2
l2_network_device: os-api
interface_model: *interface_model
- label: eth3
l2_network_device: storage
interface_model: *interface_model
- label: eth4
l2_network_device: management
interface_model: *interface_model
- label: eth5
l2_network_device: private
interface_model: *interface_model
rack-01-slave-network_config: &rack-01-slave-network_config
eth0:
networks:
- fuelweb_admin
eth1:
networks:
- public
eth2:
networks:
- os-api
eth3:
networks:
- storage
eth4:
networks:
- management
eth5:
networks:
- private
rack-01-slave-node-params: &rack-01-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *rack-01-slave-interfaces
network_config: *rack-01-slave-network_config
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
public-pool01:
net: *pool_default
params:
tag: 0
os-api-pool01:
net: *pool_default
params:
tag: 0
storage-pool01:
net: *pool_default
params:
tag: 101
management-pool01:
net: *pool_default
params:
tag: 102
private-pool01:
net: *pool_default
params:
tag: 103
groups:
- name: rack-01
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
os-api: os-api-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
public:
address_pool: public-pool01
dhcp: false
forward:
mode: nat
os-api:
address_pool: os-api-pool01
dhcp: false
forward:
mode: nat
storage:
address_pool: storage-pool01
dhcp: false
management:
address_pool: management-pool01
dhcp: false
private:
address_pool: private-pool01
dhcp: false
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: eth0
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
interface_model: *interface_model
network_config:
eth0:
networks:
- fuelweb_admin
- name: slave-01
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-02
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-03
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-04
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-05
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-06
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-07
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-08
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-09
role: fuel_slave
params: *rack-01-slave-node-params

View File

@ -0,0 +1,41 @@
---
network-config: &network-config
provider: neutron
segment-type: vlan
pubip-to-all: false
storages-config: &storages-config
volume-lvm: true
volume-ceph: false
image-ceph: false
ephemeral-ceph: false
rados-ceph: false
replica-ceph: 2
nodes: &nodes
- roles:
- controller
count: 1
- roles:
- compute
count: 1
- roles:
- cinder
count: 1
template:
name: 1 Controller, 1 Compute, 1 Cinder on Neutron/VLAN with DMZ
slaves: 3
devops_settings: !include devops_configs/public_api.yaml
cluster_template: &environment-config
name: env1
release: ubuntu
settings:
components:
sahara: false
murano: false
ceilometer: false
storages: *storages-config
network: *network-config
nodes: *nodes