Add supplemental node provisioner

Add ability to deploy a supplemental node along with undercloud
and overcloud nodes by the libvirt/setup role(s) along with
basic ability to deploy TLS everywhere.

- Update tripleo-inventory role to allow for configuring
supplmental node host access
- Update common role defaults with required vars
- Update quickstart.sh to support fake HA IPA deployment
- Update general_config/ipa.yml with relevant vars
- Add nodes/1ctlr_1comp_1supp.yml for supplmental node deployment
- Add release note

Depends-On: I53323511aabf0d616805efab6edde4acc7bedf11
Related-Bug: 1662923

Change-Id: I3f18e26d0e40942c7c54c07b644ee1209fb89c83
This commit is contained in:
Harry Rybacki 2017-03-29 14:57:27 -04:00
parent b553b7d0d9
commit b1489d7d8f
16 changed files with 387 additions and 21 deletions

View File

@ -1,12 +1,39 @@
# Deploy an HA openstack environment with an IPA Server.
# TLS everywhere related vars. #
# This enables TLS for the undercloud which will also make haproxy bind to the
# configured public-vip and admin-vip.
undercloud_generate_service_certificate: True
enable_tls_everywhere: true
novajoin_connect_timeout: 60
novajoin_read_timeout: 60
# Set the FreeIPA server IP
freeipa_internal_ip: "{{ external_network_cidr|nthhost(10) }}"
# Set node hostnames.
tripleo_domain: tripleodomain.example.com
freeipa_server_hostname: "ipa.{{ tripleo_domain }}"
undercloud_undercloud_hostname: "undercloud.{{ tripleo_domain }}"
overcloud_cloud_name: "overcloud.{{ tripleo_domain }}"
overcloud_cloud_domain: "{{ tripleo_domain }}"
overcloud_cloud_name_internal: "overcloud.internalapi.{{ tripleo_domain }}"
overcloud_cloud_name_storage: "overcloud.storage.{{ tripleo_domain }}"
overcloud_cloud_name_storage_management: "overcloud.storagemgmt.{{ tripleo_domain }}"
overcloud_cloud_name_ctlplane: "overcloud.ctlplane.{{ tripleo_domain }}"
# Define FreeIPA server as DNS server for under/overcloud.
undercloud_undercloud_nameservers: ["{{ freeipa_internal_ip }}"]
overcloud_dns_servers: ["{{ freeipa_internal_ip }}"]
# Supplemental node related vars. #
# Ensure that the FreeIPA server node is provisioned during deployment.
deploy_supplemental_node: true
supplemental_user: stack
supplemental_node_ip: "{{ freeipa_internal_ip }}"
supplemental_image_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
# We don't need introspection in a virtual environment (because we are
# creating all the "hardware" we really know the necessary
# information).
# creating all the "hardware" we really know the necessary information).
step_introspect: false
# Tell tripleo about our environment.
@ -16,14 +43,15 @@ extra_args: >-
--ntp-server pool.ntp.org
test_ping: true
enable_pacemaker: true
run_tempest: false
# options below direct automatic doc generation by tripleo-collect-logs
# Options below direct automatic doc generation by tripleo-collect-logs.
artcl_gen_docs: true
artcl_create_docs_payload:
included_deployment_scripts:
- undercloud-install
- novajoin_prep
- install_novajoin
- overcloud-custom-tht-script
- overcloud-prep-flavors
- overcloud-prep-images
@ -35,6 +63,8 @@ artcl_create_docs_payload:
- env-setup-virt
table_of_contents:
- env-setup-virt
- novajoin_prep
- install_novajoin
- undercloud-install
- overcloud-custom-tht-script
- overcloud-prep-flavors
@ -43,14 +73,3 @@ artcl_create_docs_payload:
- overcloud-deploy
- overcloud-deploy-post
- overcloud-validate
#novajoin settings
undercloud_enable_novajoin: true
undercloud_ipa_otp: 'server_otp'
# set the undercloud hostname
undercloud_undercloud_hostname: 'undercloud.example.com'
# set dns on the undercloud to the ipa server IP
undercloud_undercloud_nameservers: ['192.168.0.1']

View File

@ -0,0 +1,17 @@
# This config file is used to deploy a dedicated node on the virthost for TLS everywhere.
# Define the controller node and compute nodes.
# Create three controller nodes and one compute node.
overcloud_nodes:
- name: control_0
flavor: control
virtualbmc_port: 6230
- name: compute_0
flavor: compute
virtualbmc_port: 6233
# Define the supplmental node to be used for the FreeIPA server.
supplemental_node:
name: ipa
flavor: undercloud

View File

@ -40,7 +40,7 @@
# The `libvirt/setup` role creates the undercloud and overcloud
# virtual machines.
- name: Setup undercloud and overcloud vms
- name: Setup undercloud, overcloud, and supplemental vms
hosts: virthost
gather_facts: yes
roles:

View File

@ -398,6 +398,10 @@ elif [[ "$OPT_CONFIG" =~ .*ha_big.yml ]]; then
OLD_CONFIG=$OPT_CONFIG
OPT_CONFIG=$OPT_WORKDIR/config/general_config/pacemaker.yml;
OPT_NODES=$OPT_WORKDIR/config/nodes/3ctlr_3comp.yml;
elif [[ "$OPT_CONFIG" =~ .*fake_ha_ipa.yml ]]; then
OLD_CONFIG=$OPT_CONFIG
OPT_CONFIG=$OPT_WORKDIR/config/general_config/ipa.yml;
OPT_NODES=$OPT_WORKDIR/config/nodes/1ctlr_1comp_1supp.yml;
elif [[ "$OPT_CONFIG" =~ .*ha_ipa.yml ]]; then
OLD_CONFIG=$OPT_CONFIG
OPT_CONFIG=$OPT_WORKDIR/config/general_config/ipa.yml;

View File

@ -0,0 +1,6 @@
---
features:
- |
Add ability to deploy supplmental node alongside the undercloud.
Add ability to deploy FreeIPA on supplemental node and enable TLS
Everywhere.

View File

@ -97,6 +97,12 @@ undercloud_node:
name: undercloud
flavor: undercloud
# Do not deploy supplemental nodes by default.
deploy_supplemental_node: false
# Do not deploy FreeIPA server by default.
enable_tls_everywhere: false
# The overcloud will have three controllers, one compute node,
# and a ceph storage node.
overcloud_nodes:

View File

@ -9,9 +9,10 @@
# - `libvirt/setup/user`
# - `libvirt/setup/overcloud`
# - `libvirt/setup/undercloud`
# - `libvirt/setup/supplemental`
dependencies:
- role: libvirt
- role: setup/user
- role: setup/overcloud
- role: setup/undercloud
- { role: libvirt }
- { role: setup/user }
- { role: setup/overcloud }
- { role: setup/undercloud }
- { role: setup/supplemental, when: deploy_supplemental_node|bool }

View File

@ -0,0 +1,58 @@
libvirt/setup/supplemental
==========================
An Ansible role for provisioning a supplemental node prior to deployment
of the TripleO undecloud
Requirements
------------
This role pairs with the libvirt/setup role to provision a supplemental node
VM.
The role expects to be provided with a `supplemental_provision_script` which
will be copied to the virthost during execution and is responsible for
preparing the the vm's image and adding it to the proper libvirt pool.
Furthermore, the `supplemental_node_ip` must be configured by this script and
it will be used to add the host to ansible in-memory inventory as well as
in preparation of ssh config files by the tripleo-inventory role.
**Note:** If `enable_tls_everywhere` is true, this role will provision the
supplemental node for deployment of a FreeIPA server using the
`tls_everywhere_provisioner.sh` script in lieu of the `supplemental_provision_script`.
Role Variables
--------------
supplemental_node_key: "{{ local_working_dir }}/id_rsa_supplemental"
supplemental_base_image_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
* `deploy_supplemental_node` -- <false> if true, provision supplemental node
* `supplemental_node_key` -- Location of key to be used for access to
supplemental node vm.
* `supplemental_base_image_url` -- URL of base image to be provisioned against
* `supplemental_node_ip` -- IP which provisioned node will be externally accessible from
* `supplemental_provisioning_script` -- Path to script which will be copied to and run from the
virthost to provision the vm image
* `supplemental_user` -- <stack> The user which is used to deploy the supplemental node
Example Playbook
----------------
```yaml
---
- name: Setup supplemental vms
hosts: virhost
roles:
- libvirt/setup/supplemental
```
License
-------
Apache 2.0
Author Information
------------------
RDO-CI Team

View File

@ -0,0 +1,5 @@
---
supplemental_node_key: "{{ local_working_dir }}/id_rsa_supplemental"
supplemental_base_image_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
supplemental_user: stack

View File

@ -0,0 +1,2 @@
dependencies:
- common

View File

@ -0,0 +1,3 @@
---
- include: provision.yml

View File

@ -0,0 +1,111 @@
---
# We're going to try putting files in `local_working_dir`, so make
# sure it exists first.
- name: Ensure local working dir exists
delegate_to: localhost
file:
path: '{{ local_working_dir }}'
state: directory
- name: Generate ssh keys
delegate_to: localhost
command: >
ssh-keygen -f '{{ supplemental_node_key }}' -N ''
-C ansible_generated_supplemental
-t rsa -b 4096
args:
creates: '{{ supplemental_node_key }}'
- name: Ensure working dir exists on virthost
file:
path: '{{ working_dir }}'
state: directory
- name: Copy ssh pub key to the virthost
copy:
src: '{{ supplemental_node_key }}.pub'
dest: '{{ working_dir }}'
owner: '{{ undercloud_user }}'
group: '{{ undercloud_user }}'
mode: 0600
become: true
# Check if the supplemental volume exists. If not, we call out to
# [fetch_image.yml](fetch_image.yml.html) to download the image.
- name: Check if the supplemental node volume already exists
command: >
virsh vol-info --pool '{{ libvirt_volume_pool }}'
'{{ supplemental_node.name }}.qcow2'
ignore_errors: true
changed_when: false
register: supplemental_vol_check
environment:
LIBVIRT_DEFAULT_URI: '{{ libvirt_uri }}'
- when: supplemental_vol_check|failed
block:
# TODO(hrybacki): Update fetch-images role to handle supplemental images
- name: Fetch centos image for ipa
get_url:
url: '{{ supplemental_base_image_url }}'
dest: '{{ image_cache_dir }}/supplemental_base.qcow2'
- name: Ensure virt-manager in installed on virthost
package:
name: 'virt-install'
state: 'present'
become: true
- name: Prepare TLS everywhere provisoner script
template:
src: tls_everywhere_provisioner.sh.j2
dest: '~/tls_everywhere_provisioner.sh'
mode: 0700
when: enable_tls_everywhere|bool
- name: Execute tls everywhere provisioner script
shell: 'bash ~/tls_everywhere_provisioner.sh &> ~/tls_everywhere_provisioner.log'
when: enable_tls_everywhere|bool
- when: supplemental_provisioning_script is defined and not enable_tls_everywhere|bool
block:
- name: Move scripts to virthost
copy:
src: '{{ supplemental_provisioning_script }}'
dest: '~/supplemental_node_provisioner.sh'
mode: 0744
- name: Provision script execution
shell: >
'bash ~/supplemental_node_provisioner.sh'
# Start the supplemental node virtual machine.
- name: Start supplemental node vm
virt:
name: '{{ supplemental_node.name }}'
command: start
state: running
uri: '{{ libvirt_uri }}'
# Configure the supplemental vm to be automatically started at boot.
- name: Configure supplemental node vm to start at virthost boot
virt:
name: '{{ supplemental_node.name }}'
command: autostart
uri: '{{ libvirt_uri }}'
- name: Wait for VM to come online
pause:
seconds: 30
# Add the supplemental to the in-memory inventory.
- name: Add supplemental node vm to inventory
add_host:
name: supplemental
groups: supplemental
ansible_host: supplemental
ansible_fqdn: supplemental
ansible_user: '{{ supplemental_user }}'
ansible_private_key_file: '{{ supplemental_node_key }}'
ansible_ssh_extra_args: '-F "{{ local_working_dir }}/ssh.config.ansible"'
supplemental_node_ip: '{{ supplemental_node_ip }}'

View File

@ -0,0 +1,79 @@
#!/bin/bash
set -eux
CLONEFROM={{ image_cache_dir }}/supplemental_base.qcow2
VMSSHKEY={{ working_dir }}/id_rsa_supplemental.pub
VMDISKADD=30G
WORKDIR=/tmp/virt-undercloud-$(date +%s)
VMIMGIPA={{ working_dir }}/ipa.qcow2
VMIMGIPACOPY={{ working_dir }}/ORIG-ipa.qcow2
# Copy qcow2 base image
cp -v $CLONEFROM $VMIMGIPA
# Resize the FreeIPA image
echo "$(date) - Adding $VMDISKADD to $VMIMGIPA: "
qemu-img resize $VMIMGIPA +$VMDISKADD
echo "$(date) - Resizing filesystem of $VMIMGIPA: "
cp -v $VMIMGIPA $VMIMGIPACOPY
virt-resize --expand /dev/sda1 $VMIMGIPACOPY $VMIMGIPA
rm -fv $VMIMGIPACOPY
echo "$(date) - Checking status of $VMIMGIPA: "
qemu-img info $VMIMGIPA
virt-filesystems --long -h --all -a $VMIMGIPA
cat > ifcfg-eth0 <<EOF
NAME=eth0
DEVICE=eth0
ONBOOT=yes
BOOTPROTO=static
IPADDR={{ freeipa_internal_ip }}
NETMASK=255.255.255.0
GATEWAY=192.168.23.1
PEERDNS=yes
DNS1=10.11.5.19
TYPE=Ethernet
EOF
cat > ifcfg-eth1 <<EOF
NAME=eth1
DEVICE=eth1
ONBOOT=yes
BOOTPROTO=none
TYPE=Ethernet
EOF
# NOTE(hrybacki): The ugly is formed as such because Ansible <2.3 lacks the ability to control
# whitespace trimming in jinja templates. This results in the inability to form
# a proper newline. http://docs.ansible.com/ansible/template_module.html#options
virt-customize -a ~{{ ssh_user }}/ipa.qcow2 \
--root-password password:redhat \
--install openssh-server \
--run-command "xfs_growfs /" \
--run-command "echo 'GRUB_CMDLINE_LINUX=\"console=tty0 crashkernel=auto no_timer_check net.ifnames=0 console=ttyS0,115200n8\"' >> /etc/default/grub" \
--run-command "grubby --update-kernel=ALL --args=net.ifnames=0" \
--run-command "systemctl enable sshd" \
{{ "--run-command \"useradd " + supplemental_user + " -m -p ''\" \\" if supplemental_user != 'root' else "\\" }}
{{ "--run-command \"echo " + supplemental_user + " ALL=\\(ALL\\) NOPASSWD:ALL >> /etc/sudoers\" \\" if supplemental_user != 'root' else "\\" }}
--mkdir /root/.ssh \
--copy-in ifcfg-eth0:/etc/sysconfig/network-scripts/ \
--copy-in ifcfg-eth1:/etc/sysconfig/network-scripts/ \
--ssh-inject {{ supplemental_user }}:file:$VMSSHKEY \
--selinux-relabel
# Deploy the ipa vm
virt-install \
--import \
--name ipa \
--ram 8086 \
--disk path=$VMIMGIPA \
--vcpus 4 \
--os-type linux \
--os-variant generic \
--network bridge=brext \
--graphics none \
--noautoconsole

View File

@ -55,6 +55,28 @@
with_items: "{{ overcloud_nodes }}"
ignore_errors: true
# Do the same thing to the supplemental node.
- environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
- name: Check undercloud vm
command: >
virsh domid "{{ supplemental_node }}"
ignore_errors: true
register: supplemental_check
- name: Destroy supplemental vm
command: >
virsh destroy "{{ supplemental_node.name }}"
when: supplemental_check|success
ignore_errors: true
- name: Undefine supplemental vm
command: >
virsh undefine "{{ supplemental_node.name }}" --remove-all-storage
when: supplemental_check|success
ignore_errors: true
# Do the same thing to the undercloud node.
- name: Check undercloud vm
command: >

View File

@ -66,6 +66,23 @@
set_fact: undercloud_ip={{ hostvars['undercloud'].undercloud_ip }}
when: hostvars['undercloud'] is defined and hostvars['undercloud'].undercloud_ip is defined
# Add the supplemental to the in-memory inventory.
- name: Add supplemental node vm to inventory
add_host:
name: supplemental
groups: supplemental
ansible_host: supplemental
ansible_fqdn: supplemental
ansible_user: '{{ supplemental_user }}'
ansible_private_key_file: '{{ local_working_dir }}/id_rsa_supplemental'
ansible_ssh_extra_args: '-F "{{local_working_dir}}/ssh.config.ansible"'
supplemental_node_ip: "{{ supplemental_node_ip }}"
when: supplemental_node_ip is defined
- name: set_fact for supplemental ip
set_fact: supplemental_node_ip={{ hostvars['supplemental'].supplemental_node_ip }}
when: hostvars['supplemental'] is defined and hostvars['supplemental'].supplemental_node_ip is defined
#readd the undercloud to reset the ansible_ssh parameters set in quickstart
- name: Add undercloud vm to inventory
add_host:
@ -95,6 +112,13 @@
-W {{ undercloud_ip }}:22"
when: hostvars[groups['virthost'][0]].ansible_private_key_file is not defined and undercloud_ip is defined
- name: set supplemental ssh proxy command
set_fact: supplemental_ssh_proxy_command="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
-o ConnectTimeout=60 -i {{ local_working_dir }}/id_rsa_virt_power
{{ ssh_user }}@{{ hostvars[groups['virthost'][0]].ansible_host }}
-W {{ supplemental_node_ip }}:22"
when: supplemental_node_ip is defined
- name: create inventory from template
delegate_to: localhost
template:

View File

@ -43,6 +43,15 @@ Host undercloud
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
{% if deploy_supplemental_node %}
Host supplemental
ProxyCommand {{ supplemental_ssh_proxy_command }}
IdentityFile {{ local_working_dir }}/id_rsa_supplemental
User {{ supplemental_user }}
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
{% endif %}
{% if groups["overcloud"] is defined %}
{% for host in groups["overcloud"] %}