455 lines
16 KiB
YAML
455 lines
16 KiB
YAML
---
|
|
# We're going to try putting files in `local_working_dir`, so make
|
|
# sure it exists first.
|
|
- name: Ensure local working dir exists
|
|
delegate_to: localhost
|
|
file:
|
|
path: "{{ local_working_dir }}"
|
|
state: directory
|
|
|
|
# Generate MAC addresses for the undercloud node.
|
|
- name: get MACs for the undercloud
|
|
generate_macs:
|
|
nodes:
|
|
- "{{ undercloud_node }}"
|
|
networks: "{{ networks }}"
|
|
register: undercloud_mac_map
|
|
|
|
# Check if the undercloud volume exists. If not, we call out to
|
|
# [fetch_image.yml](fetch_image.yml.html) to download the image.
|
|
- name: Check if undercloud volume exists
|
|
command: >
|
|
virsh vol-info --pool '{{ libvirt_volume_pool }}'
|
|
'{{ undercloud_node.name }}.qcow2'
|
|
ignore_errors: true
|
|
changed_when: false
|
|
register: undercloud_vol_check
|
|
environment:
|
|
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
|
|
|
|
- when: undercloud_vol_check is failed
|
|
environment:
|
|
LIBGUESTFS_BACKEND: direct
|
|
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
|
|
block:
|
|
# Conditionally include a playbook for all the images specified
|
|
# in options that downloads, cache and extract if tar archived
|
|
# only if the images aren't already in volume pool
|
|
- name: Fetch the images
|
|
include_role:
|
|
name: fetch-images
|
|
|
|
# Conditionally include a playbook for all the images specified
|
|
# in options that updates images with the repos provided via the
|
|
# release config.
|
|
- include_tasks: inject_repos.yml
|
|
when: update_images|bool or devmode|bool
|
|
|
|
# inject the gating repo generated by ansible-role-tripleo-gate
|
|
- include_tasks: inject_gating_repo.yml
|
|
when: compressed_gating_repo is defined and compressed_gating_repo
|
|
|
|
# Converts an overcloud-full.qcow2 into a undercloud.qcow2
|
|
- include_tasks: convert_image.yml
|
|
when: overcloud_as_undercloud|bool or baseos_as_undercloud|bool
|
|
|
|
# Update images after we have converted the overcloud-full to an
|
|
# undercloud image when using devmode. This also clones tripleo-ci
|
|
# on the undercloud image.
|
|
- include_tasks: update_image.yml
|
|
when: devmode|bool
|
|
|
|
# Inject updated overcloud and ipa images into our converted undercloud
|
|
# image
|
|
- name: Inject additional images
|
|
command: >
|
|
virt-customize -a {{ working_dir }}/undercloud.qcow2
|
|
--upload {{ working_dir }}/{{ item }}:/home/{{ undercloud_user }}/{{ item }}
|
|
--run-command 'chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/{{ item }}'
|
|
changed_when: true
|
|
with_items: "{{ inject_images | default('') }}"
|
|
when:
|
|
- overcloud_as_undercloud|bool or use_external_images|bool
|
|
- inject_images|length > 0
|
|
|
|
# This copies the `instackenv.json` configuration file that we
|
|
# generated in the overcloud setup role to the undercloud host.
|
|
- name: Copy instackenv.json to appliance
|
|
command: >
|
|
virt-customize -a {{ working_dir }}/undercloud.qcow2
|
|
--upload {{ working_dir }}/instackenv.json:/home/{{ undercloud_user }}/instackenv.json
|
|
--run-command 'chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/instackenv.json'
|
|
when: inject_instackenv|bool
|
|
|
|
# Copy the undercloud public key to the virthost, because we're going
|
|
# to inject it into the undercloud image in the next task.
|
|
- name: Copy undercloud ssh public key to working dir
|
|
copy:
|
|
src: "{{ undercloud_key }}.pub"
|
|
dest: "{{ working_dir }}/id_rsa_undercloud.pub"
|
|
|
|
# Copy the virt host private key to `$HOME/.ssh/id_rsa_virt_power` for
|
|
# VirtualBMC be able to access the hypervisor where the VMs are located
|
|
- name: Copy virt host ssh private key to working dir
|
|
when: release not in ['newton']
|
|
copy:
|
|
src: "{{ virt_power_key }}"
|
|
dest: "{{ working_dir }}/id_rsa_virt_power"
|
|
|
|
# When using qemu:///system, the vbmc will need to ssh back to the virthost
|
|
# as the root user to perform power operations
|
|
- name: Add virt power key to root authorized keys if using qemu:///system
|
|
authorized_key:
|
|
user: root
|
|
key: "{{ lookup('file', virt_power_key|quote + '.pub')|default('') }}"
|
|
when: libvirt_uri == "qemu:///system"
|
|
become: true
|
|
|
|
# Copy the public key to `$HOME/.ssh/authorized_keys` for the `root`
|
|
# and `undercloud_user` user on the undercloud.
|
|
- name: Inject undercloud ssh public key to appliance
|
|
command: >
|
|
virt-customize -a {{ working_dir }}/undercloud.qcow2
|
|
--mkdir {{ item.homedir }}/.ssh/
|
|
--upload '{{ working_dir }}/id_rsa_undercloud.pub:{{ item.homedir }}/.ssh/authorized_keys'
|
|
--run-command 'chown -R {{ item.owner }}:{{ item.group }} {{ item.homedir }}/.ssh'
|
|
--run-command 'chmod 0700 {{ item.homedir }}/.ssh'
|
|
--run-command 'chmod 0600 {{ item.homedir }}/.ssh/authorized_keys'
|
|
with_items:
|
|
- homedir: /root
|
|
owner: root
|
|
group: root
|
|
- homedir: '/home/{{ undercloud_user }}'
|
|
owner: '{{ undercloud_user }}'
|
|
group: '{{ undercloud_user }}'
|
|
|
|
# This copies the `id_rsa_virt_power` private key that we generated
|
|
# in the overcloud setup role to the undercloud host to be used by
|
|
# VirtualBMC+libvirt to access the virthost.
|
|
- name: Copy id_rsa_virt_power to appliance
|
|
when: release not in ['newton']
|
|
command: >
|
|
virt-customize -a {{ working_dir }}/undercloud.qcow2
|
|
--upload '{{ working_dir }}/id_rsa_virt_power:/root/.ssh/id_rsa_virt_power'
|
|
--run-command 'chown root:root /root/.ssh/id_rsa_virt_power'
|
|
--run-command 'chmod 0600 /root/.ssh/id_rsa_virt_power'
|
|
|
|
- name: Create undercloud customize script
|
|
template:
|
|
src: "{{ undercloud_customize_script }}"
|
|
dest: "{{ working_dir }}/undercloud-customize.sh"
|
|
mode: 0755
|
|
when: undercloud_customize_script is defined
|
|
|
|
# This allows to run a customization script on the
|
|
# undercloud image, to cover any extra needs.
|
|
- name: Perform extra undercloud customizations
|
|
command: >
|
|
virt-customize -a {{ working_dir }}/undercloud.qcow2
|
|
--run '{{ working_dir }}/undercloud-customize.sh'
|
|
when: undercloud_customize_script is defined
|
|
|
|
# This allows to run a customization script on the
|
|
# overcloud image, to cover any extra needs.
|
|
- name: Perform extra overcloud customizations
|
|
include_tasks: customize_overcloud.yml
|
|
when: overcloud_customize_script is defined
|
|
|
|
# Perform an SELinux relabel on the undercloud image to avoid problems
|
|
# caused by bad labelling, since by default the undercloud runs in
|
|
# enforcing mode.
|
|
- name: Perform selinux relabel on undercloud image
|
|
command: >
|
|
virt-customize -a {{ working_dir }}/undercloud.qcow2
|
|
--selinux-relabel
|
|
|
|
# NOTE(trown) Nested blocks do not seem to work as expected so instead using
|
|
# conditionals with AND to simulate the same thing.
|
|
# Resize the undercloud image if it was not converted from an overcloud
|
|
# image
|
|
- when:
|
|
- undercloud_vol_check is failed
|
|
- not overcloud_as_undercloud|bool
|
|
block:
|
|
- name: >
|
|
Determine if the undercloud image is a whole disk image
|
|
so we can resize it appropriately
|
|
command: >
|
|
virt-filesystems -a {{ working_dir }}/undercloud.qcow2
|
|
environment:
|
|
LIBGUESTFS_BACKEND: direct
|
|
register: undercloud_partitions
|
|
|
|
- when:
|
|
- undercloud_vol_check is failed
|
|
- not overcloud_as_undercloud|bool
|
|
- undercloud_partitions.stdout=='/dev/sda1'
|
|
block:
|
|
# Handle the resize for the whole disk image case
|
|
- name: Resize undercloud image (create target image)
|
|
command: >
|
|
qemu-img create -f qcow2 -o preallocation=off
|
|
'{{ working_dir }}/undercloud-resized.qcow2'
|
|
'{{ flavors[undercloud_node.flavor].disk }}G'
|
|
|
|
- name: Resize undercloud image (call virt-resize)
|
|
command: >
|
|
virt-resize --expand /dev/sda1
|
|
'{{ working_dir }}/undercloud.qcow2'
|
|
'{{ working_dir }}/undercloud-resized.qcow2'
|
|
environment:
|
|
LIBGUESTFS_BACKEND: direct
|
|
LIBGUESTFS_DEBUG: 1
|
|
LIBGUESTFS_TRACE: 1
|
|
|
|
- name: Rename resized image to original name
|
|
command: >
|
|
mv -f '{{ working_dir }}/undercloud-resized.qcow2'
|
|
'{{ working_dir }}/undercloud.qcow2'
|
|
|
|
- name: Resize undercloud image (expand the FS)
|
|
command: >
|
|
virt-customize -a '{{ working_dir }}/undercloud.qcow2'
|
|
--run-command 'FS_TYPE=`findmnt -o FSTYPE -fn /`;
|
|
if [ "$FS_TYPE" = "xfs" ]; then xfs_growfs /;
|
|
elif [ "$FS_TYPE" = "ext4" ]; then resize2fs /dev/sda1;
|
|
else echo "ERROR: Unknown filesystem $FSTYPE, cannot resize.";
|
|
exit 1; fi'
|
|
environment:
|
|
LIBGUESTFS_BACKEND: direct
|
|
LIBGUESTFS_DEBUG: 1
|
|
LIBGUESTFS_TRACE: 1
|
|
|
|
- when:
|
|
- undercloud_vol_check is failed
|
|
- not overcloud_as_undercloud|bool
|
|
- undercloud_partitions.stdout=='/dev/sda'
|
|
block:
|
|
# Handle the resize for the partition image case
|
|
- name: Resize undercloud image (expand the image)
|
|
command: >
|
|
qemu-img resize
|
|
'{{ working_dir }}/undercloud.qcow2'
|
|
'{{ flavors[undercloud_node.flavor].disk }}G'
|
|
|
|
- name: Resize undercloud image (expand the FS)
|
|
command: >
|
|
virt-customize -a '{{ working_dir }}/undercloud.qcow2'
|
|
--run-command 'FS_TYPE=`findmnt -o FSTYPE -fn /`;
|
|
if [ "$FS_TYPE" = "xfs" ]; then xfs_growfs /;
|
|
elif [ "$FS_TYPE" = "ext4" ]; then resize2fs /dev/sda;
|
|
else echo "ERROR: Unknown filesystem $FSTYPE, cannot resize.";
|
|
exit 1; fi'
|
|
environment:
|
|
LIBGUESTFS_BACKEND: direct
|
|
LIBGUESTFS_DEBUG: 1
|
|
LIBGUESTFS_TRACE: 1
|
|
|
|
- name: Set libvirt environment when using root to run tasks
|
|
set_fact:
|
|
libvirt_environment:
|
|
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
|
|
LIBGUESTFS_BACKEND: "direct"
|
|
cacheable: true
|
|
when: ssh_user == "root"
|
|
|
|
- name: Set libvirt environment when not using root to run tasks
|
|
set_fact:
|
|
libvirt_environment:
|
|
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
|
|
cacheable: true
|
|
when: ssh_user != "root"
|
|
|
|
# NOTE(trown) We use the overcloud-full initramfs and kernel as DIB
|
|
# seems a bit smarter about extracting them than virt-get-kernel and
|
|
# the partition image is simply a converted overcloud-full
|
|
- name: Extract the kernel and initramfs from the undercloud image
|
|
command: >
|
|
virt-copy-out -a '{{ working_dir }}/undercloud.qcow2'
|
|
'/home/{{ undercloud_user }}/overcloud-full.vmlinuz'
|
|
'/home/{{ undercloud_user }}/overcloud-full.initrd'
|
|
'{{ working_dir }}'
|
|
environment: "{{ libvirt_environment }}"
|
|
when: not undercloud_use_custom_boot_images|bool
|
|
|
|
- when:
|
|
- not undercloud_use_custom_boot_images|bool
|
|
- not overcloud_as_undercloud|bool
|
|
block:
|
|
# NOTE(ykarel) This is required to get the undercloud specific
|
|
# kernel when not using overcloud_as_undercloud.
|
|
- name: Extract the kernel and initramfs from the undercloud image
|
|
command: >
|
|
virt-get-kernel -a '{{ working_dir }}/undercloud.qcow2' --unversioned-names
|
|
--output '{{ working_dir }}'
|
|
environment:
|
|
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
|
|
|
|
# NOTE(trown) The undercloudvm template expects this to be
|
|
# named overcloud-full.vmlinuz. We can update the devmode case
|
|
# to not require this step
|
|
- name: rename undercloud kernel
|
|
command: >
|
|
mv '{{ working_dir }}/vmlinuz'
|
|
'{{ working_dir }}/overcloud-full.vmlinuz'
|
|
|
|
# NOTE(trown) The undercloudvm template expects this to be
|
|
# named overcloud-full.initrd. We can update the devmode case
|
|
# to not require this step
|
|
- name: rename undercloud initramfs
|
|
command: >
|
|
mv '{{ working_dir }}/initramfs'
|
|
'{{ working_dir }}/overcloud-full.initrd'
|
|
|
|
# NOTE(trown): This is a bit of a hack to get the undercloud vm
|
|
# template to use the external kernel and initrd. We should
|
|
# instead use a different var for this and set it in the devmode
|
|
# case as well.
|
|
- name: Set overcloud_as_undercloud to true
|
|
set_fact:
|
|
overcloud_as_undercloud: true
|
|
cacheable: true
|
|
|
|
- when: undercloud_vol_check is failed
|
|
environment:
|
|
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
|
|
block:
|
|
# Create a libvirt volume and upload the undercloud image to
|
|
# libvirt.
|
|
- name: Create undercloud volume
|
|
command: >
|
|
virsh vol-create-as {{ libvirt_volume_pool }}
|
|
{{ undercloud_node.name }}.qcow2
|
|
{{ flavors[undercloud_node.flavor].disk }}G --format qcow2
|
|
|
|
- name: Upload undercloud volume to storage pool
|
|
command: >
|
|
virsh -k 0 vol-upload --pool '{{ libvirt_volume_pool }}'
|
|
'{{ undercloud_node.name }}.qcow2'
|
|
'{{ working_dir }}/undercloud.qcow2'
|
|
async: 600
|
|
poll: 10
|
|
|
|
# Define (but do no start) the undercloud virtual machine.
|
|
- name: Define undercloud vm
|
|
virt:
|
|
name: "{{ undercloud_node.name }}"
|
|
command: define
|
|
xml: "{{ lookup('template', 'undercloudvm.xml.j2') }}"
|
|
uri: "{{ libvirt_uri }}"
|
|
|
|
# Make sure we can read the image file after the copy
|
|
- name: Ensure file permissions if root used as task runner
|
|
file:
|
|
path: "{{ working_dir }}"
|
|
owner: "{{ non_root_user }}"
|
|
group: "{{ non_root_user }}"
|
|
mode: "a+x"
|
|
recurse: true
|
|
state: 'directory'
|
|
when: non_root_chown|bool
|
|
|
|
# Start the undercloud virtual machine and make it
|
|
- name: Start undercloud vm
|
|
virt:
|
|
name: "{{ undercloud_node.name }}"
|
|
command: start
|
|
autostart: true
|
|
state: running
|
|
uri: "{{ libvirt_uri }}"
|
|
|
|
# Get the ip address of the undercloud. This will retry several times
|
|
# (`undercloud_ip_retries`) until the undercloud is ready. The script
|
|
# works by getting the MAC address of the first undercloud interface,
|
|
# and then looking that up in the kernel ARP table.
|
|
- name: Get undercloud vm ip address
|
|
script: "get-undercloud-ip.sh {{ undercloud_node.name }}"
|
|
register: undercloud_vm_ip_result
|
|
until: undercloud_vm_ip_result is success
|
|
retries: "{{ undercloud_ip_retries }}"
|
|
delay: 10
|
|
environment:
|
|
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
|
|
|
|
- name: Set_fact for undercloud ip
|
|
set_fact:
|
|
undercloud_ip: "{{ undercloud_vm_ip_result.stdout_lines[0] }}"
|
|
cacheable: true
|
|
|
|
- name: Wait until ssh is available on undercloud node
|
|
wait_for:
|
|
host: "{{ undercloud_ip }}"
|
|
state: started
|
|
port: 22
|
|
timeout: 600
|
|
|
|
# Add the undercloud to the in-memory inventory.
|
|
- name: Add undercloud vm to inventory
|
|
add_host:
|
|
name: undercloud
|
|
groups: undercloud
|
|
ansible_host: undercloud
|
|
ansible_fqdn: undercloud
|
|
ansible_user: '{{ undercloud_user }}'
|
|
ansible_private_key_file: "{{ undercloud_key }}"
|
|
ansible_ssh_extra_args: '-F "{{ local_working_dir }}/ssh.config.ansible"'
|
|
undercloud_ip: "{{ undercloud_ip }}"
|
|
|
|
- name: Generate ssh configuration
|
|
delegate_to: localhost
|
|
template:
|
|
src: ssh.config.j2
|
|
dest: "{{ local_working_dir }}/ssh.config.ansible"
|
|
|
|
- when: enable_port_forward_for_tripleo_ui|bool
|
|
block:
|
|
|
|
# TO-DO weshayutin
|
|
# In the upcoming release of ansible 2.4 this should be moved to
|
|
# iptables_raw
|
|
# - name: ensure the required tcp ports are open on the virthost
|
|
- name: configure iptables
|
|
iptables:
|
|
table: filter
|
|
chain: INPUT
|
|
action: insert
|
|
protocol: tcp
|
|
match: tcp
|
|
ctstate: NEW
|
|
jump: ACCEPT
|
|
destination_port: "{{ item }}"
|
|
become: true
|
|
with_items:
|
|
- 6385
|
|
- 5000
|
|
- 5050
|
|
- 8004
|
|
- 8080
|
|
- 9000
|
|
- 8989
|
|
- 8774
|
|
- 3000
|
|
- 8181
|
|
- 8443
|
|
- 443
|
|
|
|
- name: Create ssh tunnel systemd service
|
|
template:
|
|
src: "{{ ssh_tunnel_service_file }}"
|
|
dest: "/etc/systemd/system/ssh-tunnel.service"
|
|
mode: 0644
|
|
become: true
|
|
|
|
- name: reload the systemctl daemon after file update
|
|
shell: systemctl daemon-reload
|
|
become: true
|
|
tags:
|
|
- skip_ansible_lint
|
|
|
|
- name: Enable ssh tunnel service
|
|
service:
|
|
name: ssh-tunnel
|
|
enabled: true
|
|
state: restarted
|
|
become: true
|