Minimal(ist) network config for nspawn gating

Nspawn containers can attach directly to the physical host interface
using macvlan which allows us to greatly simplify connectivity in test
instances.

Changes to the user variable files have been made to allow the services
to function on a simplified stack.

Depends-On: I083042a791d9213b9b1872a239dc18dc6c7ae46e
Change-Id: Iaa7cfb051d26bcd080df15ef8949d3cf16208ea9
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2018-08-18 00:14:55 -05:00 committed by Jesse Pretorius
parent 9d10db61d0
commit 4a07e2612e
9 changed files with 159 additions and 5 deletions

View File

@ -2,8 +2,8 @@
global_overrides:
swift:
part_power: 8
storage_network: 'br-storage'
replication_network: 'br-storage'
storage_network: "{{ (container_tech != 'nspawn') | ternary('br-storage', ansible_default_ipv4['alias']) }}"
replication_network: "{{ (container_tech != 'nspawn') | ternary('br-storage', ansible_default_ipv4['alias']) }}"
drives:
- name: swift1.img
- name: swift2.img

View File

@ -0,0 +1,137 @@
---
cidr_networks:
container: "172.29.236.0/22"
flat: 172.29.240.0/22
storage: "172.29.244.0/22"
vxlan: "172.29.248.0/22"
used_ips:
- 172.29.236.100,172.29.236.200
- 172.29.240.110,172.29.240.200
- 172.29.241.0,172.29.241.254
- 172.29.242.100,172.29.242.200
- 172.29.244.100,172.29.244.200
- 172.29.248.100,172.29.248.200
global_overrides:
internal_lb_vip_address: 172.29.236.100
external_lb_vip_address: "{{ bootstrap_host_public_address | default(ansible_default_ipv4['address']) }}"
tunnel_bridge: "mv-vxlan"
management_bridge: "mv-{{ ansible_default_ipv4['alias'] }}"
provider_networks:
- network:
container_bridge: "{{ ansible_default_ipv4['alias'] }}"
container_interface: "mv-container"
ip_from_q: "container"
type: "raw"
is_container_address: true
group_binds:
- all_containers
- hosts
- network:
container_bridge: "{{ ansible_default_ipv4['alias'] }}"
container_interface: "mv-flat"
ip_from_q: "flat"
type: "flat"
net_name: "flat"
group_binds:
- neutron_linuxbridge_agent
- octavia-worker
- octavia-housekeeping
- octavia-health-manager
- rabbitmq
- utility_all
- network:
container_bridge: "{{ ansible_default_ipv4['alias'] }}"
container_interface: "mv-storage"
ip_from_q: "storage"
type: "raw"
is_container_address: true
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
- swift_proxy
- network:
container_bridge: "{{ ansible_default_ipv4['alias'] }}"
container_interface: "mv-vxlan"
ip_from_q: "vxlan"
type: "vxlan"
net_name: "vxlan"
range: "1:1000"
group_binds:
- neutron_linuxbridge_agent
###
### Anchors
###
# NOTE(cloudull): This section and is only present as a way to show how blocks
# can be used to to simplify config when hosts serve multiple
# roles. In this case the AIO block is defined once and used
# for all other components without needing to duplicate config.
aio_block: &aio_info_block
aio1:
ip: 172.29.236.100
host_vars:
# NOTE(cloudnull): local connection is used in the gate because of the
# current gate scripts are structured. Once inline fact
# gathering is moved/removed this option can be removed.
ansible_connection: local
#
# NOTE(cloudnull): Currently set to false to speed up the test, set this
# true at a later date, when confidence is higher.
apply_security_hardening: false
#
# NOTE(cloudull): This is an example of an extra network that will be
# created on the host. This is used to setup a local host
# for testing with tempest and not something that is used
# in production.
#
# + The flat network configuration option provided here
# was created so a user can interact with a local flat
# network without any manual intervention. In a
# poduction setup with VLAN tagged interfaces or
# multiple nics this would not be required.
#
# + The "vxlan" network configuration option is provided
# so test instances can be built using l3 networks. In a
# normal production environment this would not be needed
# as a physical device (or vlan tagged interface) would
# be used and should already have an IP address assigned
# to it.
container_extra_networks:
flat_network:
bridge: "{{ ansible_default_ipv4['alias'] }}"
interface: mv-flat
address: 172.29.240.100
netmask: 255.255.252.0
storage_network:
bridge: "{{ ansible_default_ipv4['alias'] }}"
interface: mv-storage
address: 172.29.244.100
netmask: 255.255.252.0
vxlan_network:
bridge: "{{ ansible_default_ipv4['alias'] }}"
interface: mv-vxlan
address: 172.29.248.100
netmask: 255.255.252.0
container_vars:
# Optional | container_tech for a target host, default is "lxc".
container_tech: "nspawn"
###
### Infrastructure
###
# galera, memcache, rabbitmq, utility
shared-infra_hosts: *aio_info_block
# repository (apt cache, python packages, etc)
repo-infra_hosts: *aio_info_block
# rsyslog server
log_hosts: *aio_info_block

View File

@ -616,7 +616,7 @@ def _add_additional_networks(key, inventory, ip_q, q_name, netmask, interface,
is_metal = properties.get('is_metal', False)
_network = network_entry(
is_metal,
is_metal and (container.get('container_tech') != 'nspawn'),
interface,
bridge,
net_type,

View File

@ -81,6 +81,8 @@
- ansible_eth12['active'] == true
- ansible_eth13['active'] == true
- ansible_eth14['active'] == true
when:
- (container_tech | default('unknown')) != 'nspawn'
vars_files:
- "{{ playbook_dir }}/../playbooks/defaults/repo_packages/openstack_services.yml"
- vars/bootstrap-aio-vars.yml

View File

@ -131,6 +131,8 @@
# Prepare the network interfaces
- include: prepare_networking.yml
when:
- container_tech != 'nspawn'
tags:
- prepare-networking

View File

@ -34,7 +34,7 @@
- name: Deploy openstack_user_config
config_template:
src: "{{ bootstrap_host_aio_config_path }}/openstack_user_config.yml.aio.j2"
src: "{{ bootstrap_host_aio_config_path }}/openstack_user_config.yml.{{ (container_tech == 'nspawn') | ternary('aio-nspawn', 'aio') }}.j2"
dest: "/etc/openstack_deploy/openstack_user_config.yml"
config_overrides: "{{ openstack_user_config_overrides | default({}) }}"
config_type: "yaml"

View File

@ -19,9 +19,15 @@ debug: True
## Installation method for OpenStack services
install_method: "{{ lookup('env','INSTALL_METHOD') | default('source', true) }}"
## Tempest settings
{% if container_tech == 'nspawn' %}
tempest_public_subnet_cidr: "172.29.236.0/22"
tempest_public_subnet_allocation_pools: "172.29.239.110-172.29.239.200"
{% else %}
## Tempest settings
tempest_public_subnet_cidr: 172.29.248.0/22
tempest_public_subnet_allocation_pools: "172.29.249.110-172.29.249.200"
{% endif %}
## Galera settings
galera_innodb_buffer_pool_size: 16M

View File

@ -19,7 +19,7 @@ neutron_lbaas_octavia: True
octavia_amp_image_file_name: {{ bootstrap_host_octavia_tmp }}/amphora-x64-haproxy.qcow2
octavia_amp_image_upload_enabled: True
octavia_glance_image_tag:
octavia_management_net_subnet_cidr: 172.29.252.0/22
octavia_management_net_subnet_cidr: "{{ (container_tech == 'nspawn') | ternary('172.29.240.0/22', '172.29.252.0/22') }}"
# make glance only use file
glance_default_store: file

View File

@ -17,6 +17,13 @@
trove_provider_net_name: flat-db
trove_service_net_phys_net: flat-db
trove_service_net_setup: True
{% if container_tech == 'nspawn' %}
trove_service_net_subnet_cidr: "172.29.236.0/22"
trove_service_net_allocation_pool_start: "172.29.237.110"
trove_service_net_allocation_pool_end: "172.29.237.200"
{% else %}
trove_service_net_subnet_cidr: "172.29.232.0/22"
trove_service_net_allocation_pool_start: "172.29.233.110"
trove_service_net_allocation_pool_end: "172.29.233.200"
{% endif %}