Use networkd role to setup test networks

The test networks were being setup using networkd and a custom template,
this change updates the process so we're dog fooding.

Additionally change the container config drop so that its executing a
container restart when needed and in the right order.

Several tests were removed because they are basic operations which are
already being tested in the nspawn_hosts role.

Change-Id: I50799d53f8d75f043ed12d4e50da3d55a4ee159a
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2018-05-18 18:24:04 -05:00
parent ee28a70aee
commit e48838fef3
No known key found for this signature in database
GPG Key ID: 9443251A787B9FB3
10 changed files with 184 additions and 303 deletions

View File

@ -12,20 +12,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Gather facts for new container(s)
setup:
gather_subset: "!all"
listen: check connection
- name: Restart new container (first boot only)
systemd:
name: "systemd-nspawn@{{ systemd_escape.stdout }}"
state: "restarted"
enabled: true
register: machinectl_first_boot
retries: 5
delay: 2
until: machinectl_first_boot | success
delegate_to: "{{ physical_host }}"
listen: Container first boot

View File

@ -89,8 +89,6 @@
path: "/etc/systemd/nspawn/{{ inventory_hostname }}.nspawn"
register: nspawn_config_exists
delegate_to: "{{ physical_host }}"
when:
- nspawn_systemd_version | int > 219
- name: Slurp existing nspawn config
slurp:
@ -103,6 +101,68 @@
- nspawn_systemd_version | int > 219
- nspawn_config_exists.stat.exists | bool
# Check for the existance of an nspawn configuration file. If found slurp it up
# and use it as the base nspawn config file with the option to config template
# override.
- name: Copy container config (existing)
config_template:
content: "{{ nspawn_config.content | b64decode }}"
dest: "/etc/systemd/nspawn/{{ inventory_hostname }}.nspawn"
owner: "root"
group: "root"
mode: "0644"
config_overrides: "{{ container_config_overrides | default({}) }}"
config_type: "ini"
register: container_config_existing
delegate_to: "{{ physical_host }}"
when:
- nspawn_container_preserve_config | bool
- nspawn_config_exists.stat.exists | bool
- nspawn_systemd_version | int > 219
# If no nspawn configuration file exists, create a new config file using the
# default template.
- name: Copy container config (new)
config_template:
src: templates/container_config.nspawn.j2
dest: "/etc/systemd/nspawn/{{ inventory_hostname }}.nspawn"
owner: "root"
group: "root"
mode: "0644"
config_overrides: "{{ container_config_overrides | default({}) }}"
config_type: "ini"
register: container_config_new
delegate_to: "{{ physical_host }}"
when:
- not nspawn_container_preserve_config | bool or
not nspawn_config_exists.stat.exists | bool
- nspawn_systemd_version | int > 219
- name: Copy container config (old)
template:
src: templates/container_config_old.nspawn.j2
dest: "/etc/systemd/system/systemd-nspawn@{{ systemd_escape.stdout }}.service"
owner: "root"
group: "root"
mode: "0644"
register: container_config_old
delegate_to: "{{ physical_host }}"
when:
- nspawn_systemd_version | int < 220
- name: Notice existing container config changed
debug:
msg: >-
The existing container config has changed. While this change has been
noticed, no restart has been performed. By not restarting the container
the system is preserving the state of the environment by not causing
unexpected downtime. For these changes to go into effect the container
must be restarted. This can be done using the following command,
`machinectl [poweroff|start] {{ inventory_hostname }}`. To disable the
configation saving set `nspawn_container_preserve_config` to false.
when:
- container_config_existing is changed
- name: Clone the base container image
command: machinectl clone "{{ container_image }}" "{{ inventory_hostname }}"
when:
@ -112,8 +172,6 @@
delay: 2
until: machinectl_container_clone is success
delegate_to: "{{ physical_host }}"
notify:
- Container first boot
tags:
- skip_ansible_lint
@ -144,6 +202,7 @@
- "/openstack/{{ inventory_hostname }}"
- "/openstack/backup/{{ inventory_hostname }}"
- "/openstack/log/{{ inventory_hostname }}"
- "/var/lib/machines/{{ inventory_hostname }}/var/log/journal"
- "/var/lib/machines/{{ inventory_hostname }}/etc/systemd/network"
- "/var/lib/machines/{{ inventory_hostname }}/etc/systemd/nspawn"
- "/var/lib/machines/{{ inventory_hostname }}/var/lib/dbus"
@ -172,59 +231,6 @@
with_items: "{{ container_default_bind_mounts | union(container_bind_mounts | default([])) }}"
delegate_to: "{{ physical_host }}"
# Check for the existance of an nspawn configuration file. If found slurp it up
# and use it as the base nspawn config file with the option to config template
# override.
- name: modern systemd block
block:
- name: Copy container config (new)
config_template:
content: "{{ nspawn_config.content | b64decode }}"
dest: "/etc/systemd/nspawn/{{ inventory_hostname }}.nspawn"
owner: "root"
group: "root"
mode: "0644"
config_overrides: "{{ container_config_overrides | default({}) }}"
config_type: "ini"
delegate_to: "{{ physical_host }}"
when:
- nspawn_container_preserve_config | bool
- nspawn_config_exists.stat.exists | bool
# If no nspawn configuration file exists, create a new config file using the
# default template.
- name: Copy container config (new)
config_template:
src: templates/container_config.nspawn.j2
dest: "/etc/systemd/nspawn/{{ inventory_hostname }}.nspawn"
owner: "root"
group: "root"
mode: "0644"
config_overrides: "{{ container_config_overrides | default({}) }}"
config_type: "ini"
delegate_to: "{{ physical_host }}"
when:
- not nspawn_container_preserve_config | bool or
not nspawn_config_exists.stat.exists | bool
notify:
- Container first boot
when:
- nspawn_systemd_version | int > 219
- name: Copy container config (old)
template:
src: templates/container_config_old.nspawn.j2
dest: "/etc/systemd/system/systemd-nspawn@{{ systemd_escape.stdout }}.service"
owner: "root"
group: "root"
mode: "0644"
register: container_config_old
delegate_to: "{{ physical_host }}"
when:
- nspawn_systemd_version | int < 220
notify:
- Container first boot
- name: Generate machine-id
command: "systemd-machine-id-setup --root=/var/lib/machines/{{ inventory_hostname }}"
args:
@ -246,19 +252,57 @@
remote_user: root
delegate_to: "{{ physical_host }}"
- name: Create resolved link
file:
src: "/var/run/systemd/resolve/resolv.conf"
dest: "/var/lib/machines/{{ inventory_hostname }}/etc/resolv.conf"
force: true
state: link
delegate_to: "{{ physical_host }}"
when:
- nspawn_container_enable_resolved | bool
# Some distros do not have access to systemd-resolved. If the option
# `nspawn_container_enable_resolved` is disabled this will ensure functionality
# in the absence of modern systemd.
- name: Legacy resolvers
block:
- name: Check resolv.conf
stat:
path: "/var/lib/machines/{{ inventory_hostname }}/etc/resolv.conf"
delegate_to: "{{ physical_host }}"
register: nspawn_resolv_conf
- name: Remove resolv.conf link
file:
path: "/var/lib/machines/{{ inventory_hostname }}/etc/resolv.conf"
state: absent
delegate_to: "{{ physical_host }}"
when:
- nspawn_resolv_conf.stat.islnk is defined and
nspawn_resolv_conf.stat.islnk
- name: Place resolv.conf
copy:
content: |
nameserver {{ hostvars[physical_host]['ansible_mv_' + nspawn_networks['nspawn_address']['bridge']]['ipv4']['address'] }}
search {{ container_domain }}
dest: "/var/lib/machines/{{ inventory_hostname }}/etc/resolv.conf"
delegate_to: "{{ physical_host }}"
when:
- not nspawn_container_enable_resolved | bool
- name: Start new container (enable)
systemd:
daemon_reload: yes
name: "systemd-nspawn@{{ systemd_escape.stdout }}"
state: "{{ (container_config_old | changed | default(false)) | ternary('restarted', 'started') }}"
state: "{{ ((machinectl_container_clone is changed or container_config_new is changed or container_config_old is changed) | default(false)) | ternary('restarted', 'started') }}"
enabled: "{{ (nspawn_systemd_version | int > 219) | ternary('true', 'false') }}"
register: machinectl_start
retries: 5
delay: 2
until: machinectl_start is success
delegate_to: "{{ physical_host }}"
notify:
- check connection
- name: Generate hostname
command: >-
@ -279,11 +323,6 @@
# This point the container is running. Delegation should no longer be required.
# ==============================================================================
- name: Create journal directory
file:
path: "/var/log/journal"
state: "directory"
- name: Run the systemd-networkd role
include_role:
name: systemd_networkd
@ -323,42 +362,6 @@
tags:
- network-config
- name: Create resolved link
file:
src: "/var/run/systemd/resolve/resolv.conf"
dest: "/etc/resolv.conf"
force: true
state: link
when:
- nspawn_container_enable_resolved | bool
# Some distros do not have access to systemd-resolved. If the option
# `nspawn_container_enable_resolved` is disabled this will ensure functionality
# in the absence of modern systemd.
- name: Legacy resolvers
block:
- name: Check resolv.conf
stat:
path: "/etc/resolv.conf"
register: nspawn_resolv_conf
- name: Remove resolv.conf link
file:
path: "/etc/resolv.conf"
state: absent
when:
- nspawn_resolv_conf.stat.islnk is defined and
nspawn_resolv_conf.stat.islnk
- name: Place resolv.conf
copy:
content: |
nameserver {{ hostvars[physical_host]['ansible_mv_' + nspawn_networks['nspawn_address']['bridge']]['ipv4']['address'] }}
search {{ container_domain }}
dest: "/etc/resolv.conf"
when:
- not nspawn_container_enable_resolved | bool
- name: Force all notified handlers now
meta: flush_handlers

View File

@ -13,16 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
container_networks:
management_address:
address: "{{ ansible_host | default('localhost') }}"
netmask: "255.255.255.0"
bridge: "br-mgmt"
static_routes:
- cidr: 172.29.100.0/24
gateway: 172.29.100.100
bridges:
- name: "br-mgmt"
ip_addr: "172.29.100.100"
netmask: "255.255.255.0"
bridges: []

View File

@ -6,3 +6,9 @@ ansible_user: root
physical_host: localhost
container_tech: nspawn
container_networks:
management_address:
address: "{{ ansible_host }}"
netmask: "255.255.255.0"
bridge: "br-mgmt"

View File

@ -6,3 +6,9 @@ ansible_user: root
physical_host: localhost
container_tech: nspawn
container_networks:
management_address:
address: "{{ ansible_host }}"
netmask: "255.255.255.0"
bridge: "br-mgmt"

View File

@ -6,3 +6,9 @@ ansible_user: root
physical_host: localhost
container_tech: nspawn
container_networks:
management_address:
address: "{{ ansible_host }}"
netmask: "255.255.255.0"
bridge: "br-mgmt"

View File

@ -19,3 +19,9 @@ bridges:
ansible_python_interpreter: "/usr/bin/python2"
physical_host: localhost
container_networks:
management_address:
address: "172.29.236.100"
netmask: "255.255.255.0"
bridge: "br-mgmt"

View File

@ -1,41 +0,0 @@
# {{ ansible_managed }}
[Unit]
Description=test networks service
After=syslog.target
After=network.target
[Service]
Type=oneshot
User=root
RemainAfterExit=yes
{% set seen_start_interfaces = [] %}
{% for item in bridges %}
{% if item is not mapping %}
{% set item = {'name': item} %}
{% endif %}
{% if item.name not in seen_start_interfaces %}
{% set _ = seen_start_interfaces.append(item.name) %}
# Interface [{{ item.name }}]
ExecStart=-/sbin/ip link add dev "{{ item.name }}" type bridge
ExecStart=-/sbin/ip link set dev "{{ item.name }}" up
{% if item.address is defined and item.netmask is defined %}
ExecStart=-/sbin/ip address add "{{ item.address }}/{{ item.netmask }}" dev "{{ item.bridge }}"
{% endif %}
{% if item.veth_peer is defined %}
ExecStart=-/sbin/ip link add "{{ item.name }}-veth" type veth peer name "{{ item.veth_peer }}"
ExecStart=-/sbin/ip link set "{{ item.name }}-veth" up
ExecStart=-/sbin/ip link set "{{ item.veth_peer }}-veth" up
ExecStart=-/sbin/ip link set dev "{{ item.name }}-veth" master "{{ item.name }}"
ExecStop=-/sbin/ip link delete dev "{{ item.veth_peer }}-veth"
ExecStop=-/sbin/ip link delete dev "{{ item.name }}-veth"
{% endif %}
{% endif %}
ExecStop=-/sbin/ip link delete dev "{{ item.name }}"
{% endfor %}
[Install]
WantedBy=multi-user.target

View File

@ -18,11 +18,11 @@
connection: local
become: true
tasks:
- name: Test connectivity to external address
command: ping -c 1 git.openstack.org
- name: Test connectivity to container gateway
command: ping -c 1 10.0.4.1
register: machinectl_ping
retries: 3
delay: 2
delay: 5
until: machinectl_ping is success
delegate_to: "{{ item }}"
with_items: "{{ groups['all_containers'] }}"
@ -33,122 +33,9 @@
command: ping -c 1 172.29.100.101
register: machinectl_ping
retries: 3
delay: 2
delay: 5
until: machinectl_ping is success
delegate_to: "{{ item }}"
with_items: "{{ groups['all_containers'] }}"
tags:
- skip_ansible_lint
- name: Stop container3
command: "machinectl poweroff container3"
register: container_stop
changed_when: container_stop.rc == 0
failed_when: not container_stop.rc in [0, 2]
until: container_stop.rc in [0, 2]
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Start container3
command: "machinectl start container3"
register: container_start
changed_when: container_start.rc == 0
until: container_start is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Status check container1
command: "machinectl status container1"
register: container_status
changed_when: container_status.rc == 0
until: container_status is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Clone container2
command: "machinectl clone container2 test1-container2"
register: container_clone
changed_when: container_clone.rc == 0
until: container_clone is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Show image test1-container2
command: "machinectl show-image test1-container2"
register: container_show
changed_when: container_show.rc == 0
until: container_show is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Rename image test1-container2
command: "machinectl rename test1-container2 test1"
register: container_rename
changed_when: container_rename.rc == 0
until: container_rename is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Check Status of test1 image
command: "machinectl image-status test1"
register: container_status
changed_when: container_status.rc == 0
until: container_status is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Start test1 container
command: "machinectl start test1"
register: container_start
changed_when: container_start.rc == 0
until: container_start is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Status test1 container
command: "machinectl status test1"
register: container_status
changed_when: container_status.rc == 0
until: container_status is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Poweroff test1 container
command: "machinectl poweroff test1"
register: container_poweroff
changed_when: container_poweroff.rc == 0
until: container_poweroff is success
retries: 3
delay: 2
tags:
- skip_ansible_lint
- name: Remove test container images
command: "machinectl remove {{ item }}"
with_items:
- test1
register: container_poweroff
changed_when: container_poweroff.rc == 0
until: container_poweroff is success
retries: 3
delay: 2
tags:
- skip_ansible_lint

View File

@ -26,18 +26,6 @@
debug:
var: hostvars
- name: Create test network service
template:
src: "templates/test-networks.service.j2"
dest: "/etc/systemd/system/00-test-networks.service"
- name: Enable test network service
systemd:
name: "00-test-networks.service"
state: "started"
enabled: true
daemon_reload: true
- name: Ensure root ssh key
user:
name: "{{ ansible_user_id | default('root') }}"
@ -72,8 +60,6 @@
- "iptables -P INPUT ACCEPT"
- "iptables -P FORWARD ACCEPT"
- "iptables -P OUTPUT ACCEPT"
- "iptables -A POSTROUTING -t mangle -p tcp -j CHECKSUM --checksum-fill"
- "iptables -t nat -A POSTROUTING -o {{ ansible_default_ipv4.interface }} -j MASQUERADE"
tags:
- skip_ansible_lint
@ -88,6 +74,57 @@
- "91.189.91.21 images.linuxcontainers.org us.images.linuxcontainers.org"
- "91.189.88.37 images.linuxcontainers.org uk.images.linuxcontainers.org"
- name: Run the systemd-networkd role
include_role:
name: systemd_networkd
private: true
vars:
systemd_interface_cleanup: true
systemd_run_networkd: true
systemd_netdevs:
- NetDev:
Name: dummy0
Kind: dummy
- NetDev:
Name: br-mgmt
Kind: bridge
systemd_networks:
- interface: "dummy0"
bridge: "br-mgmt"
- interface: "br-mgmt"
address: "172.29.236.100"
netmask: "255.255.255.0"
tags:
- network-config
- name: Run the systemd service role
include_role:
name: systemd_service
private: true
vars:
systemd_services:
- service_name: "networking-post-up"
config_overrides:
Unit:
Description: networking-post-up
After: network-online.target
Wants: network-online.target
Service:
RemainAfterExit: yes
service_type: oneshot
execstarts:
- "-/sbin/iptables -A POSTROUTING -t mangle -p tcp --dport 22 -j CHECKSUM --checksum-fill"
- "-/sbin/iptables -t nat -A POSTROUTING -o {{ ansible_default_ipv4.interface }} -j MASQUERADE"
- "-/sbin/ethtool -K br-mgmt gso off sg off tso off tx off"
execstops:
- "/sbin/iptables -D POSTROUTING -t mangle -p tcp --dport 22 -j CHECKSUM --checksum-fill"
- "/sbin/iptables -t nat -D POSTROUTING -o {{ ansible_default_ipv4.interface }} -j MASQUERADE"
enabled: yes
state: started
systemd_tempd_prefix: openstack
tags:
- network-config
roles:
- role: "nspawn_hosts"