Merge "Add ability to provision "dumb" VMs for fake nodepool"

This commit is contained in:
Zuul 2018-04-20 04:39:24 +00:00 committed by Gerrit Code Review
commit 0012d89898
8 changed files with 347 additions and 1 deletions

View File

@ -0,0 +1,54 @@
---
- name: Destroy previous setup
hosts: virthost
gather_facts: yes
vars:
- libvirt_volume_path: /opt/vm_images
- libvirt_volume_pool: oooq_pool
- libvirt_uri: qemu:///system
- working_dir: /tmp/
- overcloud_nodes:
- name: subnode-0
flavor: control
- name: subnode-1
flavor: control
roles:
- libvirt/teardown/nodes
become: true
- name: Setup undercloud and baremetal vms and networks in libvirt
hosts: virthost
gather_facts: yes
vars:
- pub_key: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa.pub"
- libvirt_volume_path: /opt/vm_images
- image_fetch_dir: "{{ local_working_dir }}"
- fake_nodepool_vms: true
- create_instackenv_json: false
- libvirt_uri: qemu:///system
- vm_pass: random
- control_vcpu: 6
- control_memory: 16384
- overcloud_nodes:
- name: subnode-0
flavor: control
- name: subnode-1
flavor: control
- images:
- name: centos
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1701.qcow2
type: qcow2
md5sum: "bc62ad193b085680952edfa7face0f23 *CentOS-7-x86_64-GenericCloud-1701.qcow2"
roles:
- libvirt/setup/overcloud
become: true
- name: Add nodes to the generated inventory
hosts: localhost
gather_facts: yes
roles:
- tripleo-inventory

View File

@ -2,6 +2,11 @@
# images for the undercloud and overcloud.
# Note: the virt_pool module is not working properly on rhel-7.2
# https://bugs.launchpad.net/tripleo-quickstart/+bug/1597905
- name: ensure libvirt volume path exists
file:
path: "{{ libvirt_volume_path }}"
state: directory
- name: Check volume pool
command: >
virsh pool-uuid "{{ libvirt_volume_pool }}"

View File

@ -0,0 +1,26 @@
#!/bin/sh
# This script will attempt to get the ip address of the a given libvirt guest.
set -eu
PATH=$PATH:/usr/sbin:/sbin
VMNAME=$1
# Get the MAC address of the first interface by looking for looking for the
# `<mac address...` line. Yes, we're parsing XML with awk. It's probably
# safe (because the XML is coming from libvirt, so we can be reasonably
# confident that the formatting will remain the same).
mac=$(virsh dumpxml $VMNAME | awk -F "'" '/mac address/ { print $2; exit }')
# Look up the MAC address in the ARP table.
ip=$(ip neigh | grep $mac | awk '{print $1;}')
if [ -z "$ip" ]; then
echo "undercloud ip is not available" >&2
exit 1
fi
echo $ip

View File

@ -1,3 +1,4 @@
dependencies:
- libvirt
- common
- libvirt/setup/common

View File

@ -0,0 +1,193 @@
---
- name: Fetch a CentOS image to use for fake nodepool nodes
include_role:
name: fetch-images
- name: Resize undercloud image (create target image)
command: >
qemu-img create -f qcow2 -o preallocation=off
'{{ working_dir }}/undercloud-resized.qcow2'
'80G'
- name: Resize undercloud image (call virt-resize)
command: >
virt-resize --expand /dev/sda1
'{{ working_dir }}/undercloud.qcow2'
'{{ working_dir }}/undercloud-resized.qcow2'
environment:
LIBGUESTFS_BACKEND: direct
- name: Rename resized image to original name
command: >
mv -f '{{ working_dir }}/undercloud-resized.qcow2'
'{{ working_dir }}/undercloud.qcow2'
- name: Calculate password hash
no_log: true
shell: >
import crypt; print crypt.crypt("{{ vm_pass }}", "$1$SecretSalt$")
args:
executable: /usr/bin/python
register: hash
- name: Inject password into the image
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--root-password random
environment:
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
register: root_password
- name: Resize the filesystem
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--run-command 'xfs_growfs /'
environment:
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
- name: Inject ssh public key into the image
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--mkdir /root/.ssh/
--upload '{{ pub_key }}:/root/.ssh/authorized_keys'
--run-command 'chown -R root:root /root/.ssh'
--run-command 'chmod 0700 /root/.ssh'
--run-command 'chmod 0600 /root/.ssh/authorized_keys'
--selinux-relabel
environment:
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
- name: Add zuul user to the image
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--run-command 'useradd zuul'
--mkdir /home/zuul/.ssh
--run-command 'cp /root/.ssh/authorized_keys /home/zuul/.ssh/'
--run-command 'chown -R zuul:zuul /home/zuul/.ssh'
--run-command 'chmod 0700 /home/zuul/.ssh'
--run-command 'chmod 0600 /home/zuul/.ssh/authorized_keys'
--run-command 'echo "zuul ALL=(root) NOPASSWD:ALL" > /etc/sudoers.d/zuul'
--run-command 'chmod 0440 /etc/sudoers.d/zuul'
--selinux-relabel
environment:
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
- debug:
msg: "Add basic packages we need to the image"
- environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
# Create libvirt volumes and upload them to libvirt.
- name: Create fake nodepool volumes
command: >
virsh vol-create-as {{ libvirt_volume_pool}}
{{ item.name }}.qcow2
{{ flavors[item.flavor].disk }}G --format qcow2
with_items: "{{ overcloud_nodes }}"
- name: Upload the volume to storage pool
command: >
virsh -k 0 vol-upload --pool '{{ libvirt_volume_pool }}'
'{{ item.name }}.qcow2'
'{{ local_working_dir }}/undercloud.qcow2'
async: 600
poll: 10
with_items: "{{ overcloud_nodes }}"
- name: Start fake nodepool nodes
virt:
name: "{{ item.name }}"
command: start
autostart: true
state: running
uri: "{{ libvirt_uri }}"
with_items: "{{ overcloud_nodes }}"
## need to find a way to make these next tasks generic
- name: Get fake nodepool IP addresses
script: "get-undercloud-ip.sh subnode-0"
register: "subnode_0_ip_result"
until: "subnode_0_ip_result|success"
retries: 20
delay: 10
- name: Set_fact for undercloud ip
set_fact:
subnode_0_ip: "{{ subnode_0_ip_result.stdout_lines[0] }}"
cacheable: true
- name: Wait until ssh is available
wait_for:
host: "{{ subnode_0_ip }}"
state: started
port: 22
timeout: 600
- name: Add subnode-0 to inventory
add_host:
name: subnode-0
groups: subnodes
ansible_host: "{{ subnode_0_ip }}"
ansible_fqdn: "{{ subnode_0_ip }}"
ansible_user: zuul
ansible_private_key_file: "~/.ssh/id_rsa"
subnode_private_ip: "{{ subnode_0_ip }}"
subnode_public_ip: "{{ subnode_0_ip }}"
- name: Set hostname correctly for subnode-0
delegate_to: subnode-0
shell: >
echo "127.0.0.1 subnode-0 localhost" > /etc/hosts;
echo "HOSTNAME=subnode-0" >> /etc/sysconfig/network;
echo "subnode-0" > /etc/hostname;
hostnamectl set-hostname subnode-0
become: true
- name: Get fake nodepool IP addresses
script: "get-undercloud-ip.sh subnode-1"
register: "subnode_1_ip_result"
until: "subnode_1_ip_result|success"
retries: 20
delay: 10
- name: Set_fact for undercloud ip
set_fact:
subnode_1_ip: "{{ subnode_1_ip_result.stdout_lines[0] }}"
cacheable: true
- name: Wait until ssh is available
wait_for:
host: "{{ subnode_1_ip }}"
state: started
port: 22
timeout: 600
- name: Add subnode-1 to inventory
add_host:
name: subnode-1
groups: subnodes
ansible_host: "{{ subnode_1_ip }}"
ansible_fqdn: "{{ subnode_1_ip }}"
ansible_user: zuul
ansible_private_key_file: "~/.ssh/id_rsa"
subnode_private_ip: "{{ subnode_1_ip }}"
subnode_public_ip: "{{ subnode_1_ip }}"
- name: Set hostname correctly for subnode-1
delegate_to: subnode-1
shell: >
echo "127.0.0.1 subnode-1 localhost" > /etc/hosts;
echo "HOSTNAME=subnode-1" >> /etc/sysconfig/network;
echo "subnode-1" > /etc/hostname;
hostnamectl set-hostname subnode-1
become: true
- debug:
var: root_password
verbosity: 2

View File

@ -26,7 +26,9 @@
virsh vol-create-as '{{ libvirt_volume_pool }}'
'{{ item.item.name }}'.qcow2 '{{ flavors[item.item.flavor].disk }}'G
--format qcow2
when: item|failed
when:
- item|failed
- not fake_nodepool_vms|default("false")|bool
with_items: "{{ overcloud_vol_check.results }}"
# Define (but do not start) the overcloud nodes. These will be
@ -38,6 +40,19 @@
xml: "{{ lookup('template', 'baremetalvm.xml.j2') }}"
uri: "{{ libvirt_uri }}"
with_items: "{{ overcloud_nodes }}"
when: not fake_nodepool_vms|default("false")|bool
- name: Define overcloud vms
virt:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'fakenodepoolvm.xml.j2') }}"
uri: "{{ libvirt_uri }}"
with_items: "{{ overcloud_nodes }}"
when: fake_nodepool_vms|default("false")|bool
- include: fake_nodepool.yml
when: fake_nodepool_vms|default("false")|bool
# Create additional blockdevices for each objectstorage flavor node
# These are sparse files, not using space if unused
@ -74,3 +89,4 @@
template:
src: "{{ undercloud_instackenv_template }}"
dest: "{{ working_dir }}/instackenv.json"
when: create_instackenv_json|default("true")|bool

View File

@ -0,0 +1,47 @@
<domain type='{{ libvirt_domain_type }}'>
<name>{{ item.name }}</name>
<memory unit='MiB'>{{ flavors[item.flavor].memory }}</memory>
<vcpu>{{ flavors[item.flavor].vcpu }}</vcpu>
{{baremetal_vm_xml|default('')}}
<os>
<type arch='{{ libvirt_arch }}'>hvm</type>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<disk type='volume' device='disk'>
<driver name='qemu' type='qcow2' cache='unsafe'/>
<source pool='{{ libvirt_volume_pool }}' volume='{{ item.name }}.qcow2'/>
<target dev='{{ libvirt_diskdev }}' bus='{{ libvirt_diskbus }}'/>
</disk>
<interface type='bridge'>
<source bridge='virbr0'/>
<model type='virtio'/>
</interface>
<serial type='pty'/>
<console type='pty'/>
{% if enable_vnc_console|bool %}
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
<video>
<model type='cirrus' vram='9216' heads='1'/>
</video>
{% endif %}
{{baremetal_vm_device_xml|default('')}}
</devices>
</domain>

View File

@ -18,6 +18,10 @@
%} undercloud_ip={{ hostvars[host]['undercloud_ip'] }}{% endif %}
{% if 'ansible_ssh_extra_args' in hostvars[host]
%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %}
{% if 'subnode_public_ip' in hostvars[host]
%} subnode_public_ip={{ hostvars[host]['subnode_public_ip'] }}{% endif %}
{% if 'subnode_private_ip' in hostvars[host]
%} subnode_private_ip={{ hostvars[host]['subnode_private_ip'] }}{% endif %}
{% endif %}
{% endfor %}