VirtualBMC support for tripleo-quickstart

This patch is replacing the usage of the pxe_ssh driver in Ironic in
favor of pxe_ipmitool + VirtualBMC*. The main reason for this patch is
because the pxe_ssh driver is already marked as unsupported and will be
removed from Ironic upstream at the end of the Ocata release.

Older releases: Liberty, Mitaka and Newton will continue to rely on the
pxe_ssh driver.

* VirtualBMC is a proxy that converts IPMI commands to libvirt calls

Partial-Bug: #1645733
Change-Id: Iaaa9fc1fd593cbce045456f4341c461a8eea82ad
This commit is contained in:
Lucas Alvares Gomes 2016-11-18 11:11:43 +00:00
parent 2d42633a43
commit c8c6e57ca2
12 changed files with 118 additions and 9 deletions

View File

@ -4,12 +4,15 @@
overcloud_nodes:
- name: control_0
flavor: control
virtualbmc_port: 6230
- name: compute_0
flavor: compute
virtualbmc_port: 6231
- name: ceph_0
flavor: ceph
virtualbmc_port: 6232
# Tell tripleo how we want things done.
extra_args: >-

View File

@ -24,13 +24,19 @@ undercloud_generate_service_certificate: True
overcloud_nodes:
- name: control_0
flavor: control
virtualbmc_port: 6230
- name: control_1
flavor: control
virtualbmc_port: 6231
- name: control_2
flavor: control
virtualbmc_port: 6232
- name: compute_0
flavor: compute
virtualbmc_port: 6233
# We don't need introspection in a virtual environment (because we are
# creating all the "hardware" we really know the necessary

View File

@ -21,17 +21,27 @@ default_vcpu: 1
overcloud_nodes:
- name: control_0
flavor: control
virtualbmc_port: 6230
- name: control_1
flavor: control
virtualbmc_port: 6231
- name: control_2
flavor: control
virtualbmc_port: 6232
- name: compute_0
flavor: compute
virtualbmc_port: 6233
- name: compute_1
flavor: compute
virtualbmc_port: 6234
- name: compute_2
flavor: compute
virtualbmc_port: 6235
# We don't need introspection in a virtual environment (because we are
# creating all the "hardware" we really know the necessary

View File

@ -7,9 +7,11 @@ step_introspect: true
overcloud_nodes:
- name: control_0
flavor: control
virtualbmc_port: 6230
- name: compute_0
flavor: compute
virtualbmc_port: 6231
# Tell tripleo how we want things done.
extra_args: >-

View File

@ -6,9 +6,11 @@ step_introspect: true
overcloud_nodes:
- name: control_0
flavor: control
virtualbmc_port: 6230
- name: compute_0
flavor: compute
virtualbmc_port: 6231
network_isolation: false

View File

@ -5,9 +5,11 @@
overcloud_nodes:
- name: control_0
flavor: control
virtualbmc_port: 6230
- name: compute_0
flavor: compute
virtualbmc_port: 6231
# Tell tripleo how we want things done.
extra_args: >-

View File

@ -152,3 +152,7 @@ devmode: false
# Tuned profile set while provisioning remote hosts to optimize for deployment
tuned_profile: 'virtual-host'
# This is the name of the user the `provision` role will create on the
# remote host.
non_root_user: stack

View File

@ -6,8 +6,18 @@
{% for node in overcloud_nodes %}
{
"name": "{{ node.name|replace('_', '-') }}",
"pm_password": {{ virt_power_key_pvt | to_nice_json }},
"pm_type": "pxe_ssh",
{% if release in ['liberty', 'mitaka', 'newton' ] %}
"pm_password": {{ virt_power_key_pvt | to_nice_json }},
"pm_type": "pxe_ssh",
"pm_user": "{{ ansible_user_id }}",
"pm_addr": "{{ host_ip }}",
{% else %}
"pm_password": "password",
"pm_type": "pxe_ipmitool",
"pm_user": "admin",
"pm_addr": "127.0.0.1",
"pm_port": "{{ node.virtualbmc_port }}",
{% endif %}
"mac": [
"{{ node_mac_map.get(node.name).get('overcloud') }}"
],
@ -15,8 +25,6 @@
"memory": "{{ flavors[node.flavor].memory }}",
"disk": "{{ flavors[node.flavor].disk }}",
"arch": "{{ libvirt_arch }}",
"pm_user": "{{ ansible_user_id }}",
"pm_addr": "{{ host_ip }}",
"capabilities": "profile:{{ node.flavor }},boot_option:local"
}
{% if not loop.last %}

View File

@ -86,6 +86,14 @@
src: "{{ undercloud_key }}.pub"
dest: "{{ working_dir }}/id_rsa_undercloud.pub"
# Copy the virt host private key to `$HOME/.ssh/id_rsa_virt_power` for
# VirtualBMC be able to access the hypervisor where the VMs are located
- name: Copy virt host ssh private key to working dir
when: release not in ['liberty', 'mitaka', 'newton']
copy:
src: "{{ virt_power_key }}"
dest: "{{ working_dir }}/id_rsa_virt_power"
# Copy the public key to `$HOME/.ssh/authorized_keys` for the `root`
# and `stack` user on the undercloud.
- name: Inject undercloud ssh public key to appliance
@ -106,6 +114,19 @@
owner: stack
group: stack
# This copies the `id_rsa_virt_power` private key that we generated
# in the overcloud setup role to the undercloud host to be used by
# VirtualBMC+libvirt to access the virthost.
- name: Copy id_rsa_virt_power to appliance
when: release not in ['liberty', 'mitaka', 'newton']
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload '{{ working_dir }}/id_rsa_virt_power:/root/.ssh/id_rsa_virt_power'
--run-command 'chown root:root /root/.ssh/id_rsa_virt_power'
--run-command 'chmod 0600 /root/.ssh/id_rsa_virt_power'
environment:
LIBGUESTFS_BACKEND: direct
- name: Create undercloud customize script
template:
src: "{{ undercloud_customize_script }}"
@ -308,4 +329,3 @@
template:
src: ssh.config.j2
dest: "{{ local_working_dir }}/ssh.config.ansible"

View File

@ -1,7 +1,3 @@
# This is the name of the user the `provision` role will create on the
# remote host.
non_root_user: stack
# The path to an ssh key (that we will generate) that can be used to
# log in to the virt host.
virt_host_key: "{{ local_working_dir }}/id_rsa_virt_host"

View File

@ -40,6 +40,15 @@
shell: /bin/bash
become: true
- name: Get the non-root user UID
command: "id {{ non_root_user }} -u"
register: non_root_user_uid_output
changed_when: false
- name: Save the non-root user UID
set_fact:
non_root_user_uid: "{{ non_root_user_uid_output.stdout }}"
# Install the public component of `virt_host_key` in the
# `.ssh/authorized_keys` file for the non-root user.
- name: Configure non-root user authorized_keys

View File

@ -24,3 +24,50 @@
dest: "{{ local_working_dir }}/stackrc"
line: "export OS_PASSWORD={{ undercloud_admin_password.stdout }}"
regexp: "OS_PASSWORD"
- name: Install VirtualBMC package
when: release not in ['liberty', 'mitaka', 'newton']
package:
name: "python2-virtualbmc"
state: present
use: yum
become: true
- name: Create the Virtual BMCs
when: release not in ['liberty', 'mitaka', 'newton']
command: >
vbmc add {{item.name}} --port {{item.virtualbmc_port}} --libvirt-uri "qemu+ssh://{{ non_root_user }}@{{ networks[0].address }}/session?socket=/run/user/{{ hostvars[groups['virthost'][0]].non_root_user_uid }}/libvirt/libvirt-sock&keyfile=/root/.ssh/id_rsa_virt_power&no_verify=1&no_tty=1"
with_items: "{{ overcloud_nodes }}"
become: true
become_user: root
changed_when: false
# TODO(lucasagomes): The service file should be included in the
# virtualbmc RPM package.
- name: Create the VirtualBMC systemd service
when: release not in ['liberty', 'mitaka', 'newton']
copy:
mode: 0664
dest: "/usr/lib/systemd/system/virtualbmc.service"
content: |
[Unit]
Description=VirtualBMC service
After=network.target
[Service]
Type=oneshot
ExecStart=/bin/bash -c 'for bmc in $(ls /root/.vbmc/); do vbmc start $bmc; done'
ExecStop=/bin/bash -c 'for bmc in $(ls /root/.vbmc/); do vbmc stop $bmc; done'
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
become: true
- name: Start the Virtual BMCs
when: release not in ['liberty', 'mitaka', 'newton']
service:
name: virtualbmc
state: started
enabled: true
become: true