Add support for infrastructure VMs

This change allows you to define additional VMs to deploy
on the seed-hypervisor.

Co-authored-by: Piotr Parczewski <piotr@stackhpc.com>
Co-authored-by: Will Szumski <will@stackhpc.com>
Co-authored-by: Mark Goddard <mark@stackhpc.com>
Story: 2008741
Task: 42095
Change-Id: I8055fc5eb0a9edadcb35767303c659922f2d07ca
This commit is contained in:
Will Szumski 2021-03-29 12:24:44 +02:00 committed by Mark Goddard
parent 3b90164102
commit c583922c27
47 changed files with 1272 additions and 19 deletions

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure development tools are installed - name: Ensure development tools are installed
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tags: tags:
- dev-tools - dev-tools
roles: roles:

View File

@ -4,7 +4,7 @@
# In some cases cloud-init reconfigure automatically network interface # In some cases cloud-init reconfigure automatically network interface
# and cause some issues in network configuration # and cause some issues in network configuration
- name: Disable Cloud-init service - name: Disable Cloud-init service
hosts: overcloud hosts: overcloud:infra-vms
tags: tags:
- disable-cloud-init - disable-cloud-init
roles: roles:

View File

@ -3,7 +3,7 @@
# servers but gets in the way after this as it tries to enable all network # servers but gets in the way after this as it tries to enable all network
# interfaces. In some cases this can lead to timeouts. # interfaces. In some cases this can lead to timeouts.
- name: Ensure Glean is disabled and its artifacts are removed - name: Ensure Glean is disabled and its artifacts are removed
hosts: seed:overcloud hosts: seed:overcloud:infra-vms
tags: tags:
- disable-glean - disable-glean
roles: roles:

View File

@ -1,6 +1,6 @@
--- ---
- name: Disable SELinux and reboot if required - name: Disable SELinux and reboot if required
hosts: seed:overcloud hosts: seed:overcloud:infra-vms
tags: tags:
- disable-selinux - disable-selinux
roles: roles:

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure DNF repos are configured - name: Ensure DNF repos are configured
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
vars: vars:
ansible_python_interpreter: /usr/bin/python3 ansible_python_interpreter: /usr/bin/python3
tags: tags:

View File

@ -0,0 +1,173 @@
---
###############################################################################
# Infrastructure VM configuration.
# Name of the infra VM.
infra_vm_name: "{{ inventory_hostname }}"
# Memory in MB.
infra_vm_memory_mb: "{{ 16 * 1024 }}"
# Number of vCPUs.
infra_vm_vcpus: 4
# List of volumes.
infra_vm_volumes:
- "{{ infra_vm_root_volume }}"
- "{{ infra_vm_data_volume }}"
# Root volume.
infra_vm_root_volume:
name: "{{ infra_vm_name }}-root"
pool: "{{ infra_vm_pool }}"
capacity: "{{ infra_vm_root_capacity }}"
format: "{{ infra_vm_root_format }}"
image: "{{ infra_vm_root_image }}"
# Data volume.
infra_vm_data_volume:
name: "{{ infra_vm_name }}-data"
pool: "{{ infra_vm_pool }}"
capacity: "{{ infra_vm_data_capacity }}"
format: "{{ infra_vm_data_format }}"
# Name of the storage pool for the infra VM volumes.
infra_vm_pool: default
# Capacity of the infra VM root volume.
infra_vm_root_capacity: 50G
# Format of the infra VM root volume.
infra_vm_root_format: qcow2
# Base image for the infra VM root volume. Default is
# "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img"
# when os_distribution is "ubuntu", or
# "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210210.0.x86_64.qcow2"
# otherwise.
infra_vm_root_image: >-
{%- if os_distribution == 'ubuntu' %}
https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img
{%- else -%}
https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210210.0.x86_64.qcow2
{%- endif %}
# Capacity of the infra VM data volume.
infra_vm_data_capacity: 100G
# Format of the infra VM data volume.
infra_vm_data_format: qcow2
# List of network interfaces to attach to the infra VM.
infra_vm_interfaces: "{{ network_interfaces | sort | map('net_libvirt_vm_network') | list }}"
# Hypervisor that the VM runs on.
infra_vm_hypervisor: "{{ groups['seed-hypervisor'] | first }}"
# Customise ansible_ssh_extra_args for the test that checks SSH connectivity
# after provisioning. Defaults to disabling ssh host key checking.
infra_vm_wait_connection_ssh_extra_args: '-o StrictHostKeyChecking=no'
# OS family. Needed for config drive generation.
infra_vm_os_family: "{{ 'RedHat' if os_distribution == 'centos' else 'Debian' }}"
###############################################################################
# Infrastructure VM node configuration.
# User with which to access the infrastructure vm via SSH during bootstrap, in
# order to setup the Kayobe user account. Default is {{ os_distribution }}.
infra_vm_bootstrap_user: "{{ os_distribution }}"
###############################################################################
# Infrastructure VM network interface configuration.
# List of networks to which infrastructure vm nodes are attached.
infra_vm_network_interfaces: >
{{ (infra_vm_default_network_interfaces +
infra_vm_extra_network_interfaces) | select | unique | list }}
# List of default networks to which infrastructure vm nodes are attached.
infra_vm_default_network_interfaces: >
{{ [admin_oc_net_name] | select | unique | list }}
# List of extra networks to which infrastructure vm nodes are attached.
infra_vm_extra_network_interfaces: []
###############################################################################
# Infrastructure VM node software RAID configuration.
# List of software RAID arrays. See mrlesmithjr.mdadm role for format.
infra_vm_mdadm_arrays: []
###############################################################################
# Infrastructure VM node encryption configuration.
# List of block devices to encrypt. See stackhpc.luks role for format.
infra_vm_luks_devices: []
###############################################################################
# Infrastructure VM node LVM configuration.
# List of infrastructure vm volume groups. See mrlesmithjr.manage-lvm role for
# format.
infra_vm_lvm_groups: "{{ infra_vm_lvm_groups_default + infra_vm_lvm_groups_extra }}"
# Default list of infrastructure vm volume groups. See mrlesmithjr.manage-lvm
# role for format.
infra_vm_lvm_groups_default: "{{ [infra_vm_lvm_group_data] if infra_vm_lvm_group_data_enabled | bool else [] }}"
# Additional list of infrastructure vm volume groups. See mrlesmithjr.manage-lvm
# role for format.
infra_vm_lvm_groups_extra: []
# Whether a 'data' LVM volume group should exist on the infrastructure vm. By
# default this contains a 'docker-volumes' logical volume for Docker volume
# storage. It will also be used for Docker container and image storage if #
# 'docker_storage_driver' is set to 'devicemapper'. Default is true if #
# 'docker_storage_driver' is set to 'devicemapper', or false otherwise.
infra_vm_lvm_group_data_enabled: "{{ docker_storage_driver == 'devicemapper' }}"
# Infrastructure VM LVM volume group for data. See mrlesmithjr.manage-lvm role
# for format.
infra_vm_lvm_group_data:
vgname: data
disks: "{{ infra_vm_lvm_group_data_disks }}"
create: True
lvnames: "{{ infra_vm_lvm_group_data_lvs }}"
# List of disks for use by infrastructure vm LVM data volume group. Default to
# an invalid value to require configuration.
infra_vm_lvm_group_data_disks:
- changeme
# List of LVM logical volumes for the data volume group.
infra_vm_lvm_group_data_lvs:
- "{{ infra_vm_lvm_group_data_lv_docker_volumes }}"
# Docker volumes LVM backing volume.
infra_vm_lvm_group_data_lv_docker_volumes:
lvname: docker-volumes
size: "{{ infra_vm_lvm_group_data_lv_docker_volumes_size }}"
create: True
filesystem: "{{ infra_vm_lvm_group_data_lv_docker_volumes_fs }}"
mount: True
mntp: /var/lib/docker/volumes
# Size of docker volumes LVM backing volume.
infra_vm_lvm_group_data_lv_docker_volumes_size: 75%VG
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
infra_vm_lvm_group_data_lv_docker_volumes_fs: ext4
###############################################################################
# Infrastructure VM node sysctl configuration.
# Dict of sysctl parameters to set.
infra_vm_sysctl_parameters: {}
###############################################################################
# Infrastructure VM node user configuration.
# List of users to create. This should be in a format accepted by the
# singleplatform-eng.users role.
infra_vm_users: "{{ users_default }}"

View File

@ -0,0 +1,3 @@
---
# Host/IP with which to access the infra VMs via SSH.
ansible_host: "{{ admin_oc_net_name | net_ip }}"

View File

@ -0,0 +1,7 @@
---
# User with which to access the infra VMs via SSH.
ansible_user: "{{ kayobe_ansible_user }}"
# User with which to access the infra VMs before the kayobe_ansible_user
# account has been created.
bootstrap_user: "{{ infra_vm_bootstrap_user }}"

View File

@ -0,0 +1,6 @@
---
###############################################################################
# Infra VM node encryption configuration.
# List of block devices to encrypt. See stackhpc.luks role for format.
luks_devices: "{{ infra_vm_luks_devices }}"

View File

@ -0,0 +1,6 @@
---
###############################################################################
# Infra VM node LVM configuration.
# List of LVM volume groups.
lvm_groups: "{{ infra_vm_lvm_groups }}"

View File

@ -0,0 +1,6 @@
---
###############################################################################
# Infra VM node software RAID configuration.
# List of software RAID arrays. See mrlesmithjr.mdadm role for format.
mdadm_arrays: "{{ infra_vm_mdadm_arrays }}"

View File

@ -0,0 +1,6 @@
---
###############################################################################
# Network interface attachments.
# List of networks to which these nodes are attached.
network_interfaces: "{{ infra_vm_network_interfaces | unique | list }}"

View File

@ -0,0 +1,3 @@
---
# Dict of sysctl parameters to set.
sysctl_parameters: "{{ infra_vm_sysctl_parameters }}"

View File

@ -0,0 +1,4 @@
---
# List of users to create. This should be in a format accepted by the
# singleplatform-eng.users role.
users: "{{ infra_vm_users }}"

View File

@ -1,7 +1,7 @@
--- ---
- name: Run a command - name: Run a command
gather_facts: False gather_facts: False
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tasks: tasks:
- name: Run a command - name: Run a command
shell: "{{ host_command_to_run }}" shell: "{{ host_command_to_run }}"

View File

@ -1,6 +1,6 @@
--- ---
- name: Update host packages - name: Update host packages
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
vars: vars:
# Optionally set this to a list of packages to update. Default behaviour is # Optionally set this to a list of packages to update. Default behaviour is
# to update all packages. # to update all packages.

View File

@ -0,0 +1,38 @@
---
- name: Set facts about infra VMs
gather_facts: false
hosts: "{{ infra_vm_limit | default('infra-vms') }}"
tags:
- always
tasks:
- name: Group virtual machines by hypervisor
group_by:
key: infra_vms_{{ infra_vm_hypervisor }}
# FIXME(mgoddard): Is delegate_to necessary?
delegate_to: "{{ infra_vm_hypervisor }}"
changed_when: false
- name: Ensure defined infra VMs are destroyed
hosts: hypervisors
tags:
- infra-vm-deprovision
tasks:
- import_role:
name: infra-vms
vars:
infra_vm_action: destroy
infra_vm_vms: "{{ groups['infra_vms_' ~ inventory_hostname ] | default([]) }}"
- name: Set facts about infra VMs
gather_facts: false
hosts: "{{ infra_vm_limit | default('infra-vms') }}"
tags:
- infra-vm-deprovision
tasks:
- name: Remove host key from known hosts
known_hosts:
name: "{{ ansible_host }}"
state: "absent"
delegate_to: localhost
throttle: 1

View File

@ -0,0 +1,44 @@
---
- name: Set facts about infra VMs
gather_facts: false
hosts: "{{ infra_vm_limit | default('infra-vms') }}"
tags:
- always
tasks:
- name: Group virtual machines by hypervisor
group_by:
key: infra_vms_{{ infra_vm_hypervisor }}
# FIXME(mgoddard): Is delegate_to necessary?
delegate_to: "{{ infra_vm_hypervisor }}"
changed_when: false
- name: Ensure defined infra VMs are deployed
hosts: hypervisors
tags:
- infra-vm-provision
tasks:
- import_role:
name: infra-vms
vars:
infra_vm_vms: "{{ groups['infra_vms_' ~ inventory_hostname ] | default([]) }}"
- name: Wait for infra VMs to be accessible
hosts: "{{ infra_vm_limit | default('infra-vms') }}"
gather_facts: false
tags:
- infra-vm-provision
tasks:
- name: Wait for a connection to VM with bootstrap user
wait_for_connection:
# NOTE: Ensure we exceed the 5 minute DHCP timeout of the eth0
# interface if necessary.
timeout: 600
vars:
# NOTE(wszumski): ansible_host_key_checking variable doesn't seem to
# work, But it would be nice not to fail if the host_key changes.
# We check the hostkey during host configure.
# https://github.com/ansible/ansible/blob/1c34492413dec09711c430745034db0c108227a9/lib/ansible/plugins/connection/ssh.py#L49
# https://github.com/ansible/ansible/issues/49254
ansible_ssh_extra_args: '{{ infra_vm_wait_connection_ssh_extra_args }}'
ansible_user: "{{ bootstrap_user }}"

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure IP addresses are allocated - name: Ensure IP addresses are allocated
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tags: tags:
- ip-allocation - ip-allocation
gather_facts: no gather_facts: no

View File

@ -7,7 +7,7 @@
# bootstrap process if the account is inaccessible. # bootstrap process if the account is inaccessible.
- name: Determine whether user bootstrapping is required - name: Determine whether user bootstrapping is required
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
gather_facts: false gather_facts: false
tags: tags:
- kayobe-ansible-user - kayobe-ansible-user

View File

@ -3,7 +3,7 @@
# when running kayobe. # when running kayobe.
- name: Ensure a virtualenv exists for kayobe - name: Ensure a virtualenv exists for kayobe
hosts: seed:seed-hypervisor:overcloud hosts: seed:seed-hypervisor:overcloud:infra-vms
gather_facts: False gather_facts: False
tags: tags:
- kayobe-target-venv - kayobe-target-venv

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure encryption configuration is applied - name: Ensure encryption configuration is applied
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tags: tags:
- luks - luks
tasks: tasks:

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure LVM configuration is applied - name: Ensure LVM configuration is applied
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tags: tags:
- lvm - lvm
- upgrade-check - upgrade-check

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure software RAID configuration is applied - name: Ensure software RAID configuration is applied
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tags: tags:
- mdadm - mdadm
roles: roles:

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure networking is configured - name: Ensure networking is configured
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tags: tags:
- config - config
- network - network

View File

@ -1,6 +1,6 @@
--- ---
- name: Configure local PyPi mirror - name: Configure local PyPi mirror
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tags: tags:
- pip - pip
vars: vars:

View File

@ -0,0 +1,7 @@
---
# Either 'deploy' or 'destroy'.
infra_vm_action: deploy
# List of inventory hostnames of infra VMs mapped to this hypervisor.
infra_vm_vms: []

View File

@ -0,0 +1,73 @@
---
- name: "[{{ vm_name }}] Ensure that the VM configdrive exists"
include_role:
name: jriguera.configdrive
vars:
configdrive_os_family: "{{ vm_hostvars.infra_vm_os_family }}"
configdrive_uuid: "{{ vm_name | to_uuid }}"
# Must set configdrive_instance_dir when using a loop
# https://github.com/jriguera/ansible-role-configdrive/blob/8438592c84585c86e62ae07e526d3da53629b377/tasks/main.yml#L17
configdrive_instance_dir: "{{ configdrive_uuid }}"
configdrive_fqdn: "{{ vm_name }}"
configdrive_name: "{{ vm_name }}"
configdrive_ssh_public_key: "{{ lookup('file', ssh_public_key_path) }}"
configdrive_config_dir: "{{ image_cache_path }}"
configdrive_volume_path: "{{ image_cache_path }}"
configdrive_config_dir_delete: False
configdrive_resolv:
domain: "{{ vm_hostvars.resolv_domain | default }}"
search: "{{ vm_hostvars.resolv_search | default }}"
dns: "{{ vm_hostvars.resolv_nameservers | default([]) }}"
configdrive_network_device_list: >
{{ vm_hostvars.network_interfaces |
map('net_configdrive_network_device', vm_hostvars.inventory_hostname) |
list }}
- name: "[{{ vm_name }}] Set a fact containing the configdrive image path"
set_fact:
vm_configdrive_path: "{{ image_cache_path }}/{{ vm_name }}.iso"
- name: "[{{ vm_name }}] Ensure configdrive is decoded and decompressed"
shell: >
base64 -d {{ image_cache_path }}/{{ vm_name | to_uuid }}.gz
| gunzip
> {{ vm_configdrive_path }}
- name: "[{{ vm_name }}] Ensure unnecessary files are removed"
file:
path: "{{ item }}"
state: absent
with_items:
- "{{ image_cache_path }}/{{ vm_name | to_uuid }}.gz"
- name: "[{{ vm_name }}] Check the size of the configdrive image"
stat:
path: "{{ vm_configdrive_path }}"
get_checksum: False
get_md5: False
mime: False
register: stat_result
- name: "[{{ vm_name }}] Ensure that the VM is provisioned"
include_role:
name: stackhpc.libvirt-vm
vars:
vm_configdrive_device: cdrom
vm_configdrive_volume:
name: "{{ vm_name }}-configdrive"
pool: "{{ vm_hostvars.infra_vm_pool }}"
# Round size up to next multiple of 4096.
capacity: "{{ (stat_result.stat.size + 4095) // 4096 * 4096 }}"
device: "{{ vm_configdrive_device }}"
format: "raw"
image: "{{ vm_configdrive_path }}"
remote_src: true
libvirt_vm_image_cache_path: "{{ image_cache_path }}"
libvirt_vms:
- name: "{{ vm_name }}"
memory_mb: "{{ vm_hostvars.infra_vm_memory_mb }}"
vcpus: "{{ vm_hostvars.infra_vm_vcpus }}"
volumes: "{{ vm_hostvars.infra_vm_volumes + [vm_configdrive_volume] }}"
interfaces: "{{ vm_hostvars.infra_vm_interfaces }}"
console_log_enabled: true

View File

@ -0,0 +1,16 @@
---
- name: Destroy VMs
import_role:
name: stackhpc.libvirt-vm
vars:
infra_vm_configdrive_volume:
name: "{{ vm_name }}-configdrive"
pool: "{{ hostvars[vm_hostvars.infra_vm_hypervisor].infra_vm_pool }}"
libvirt_vms:
- name: "{{ vm_name }}"
memory_mb: "{{ vm_hostvars.infra_vm_memory_mb }}"
vcpus: "{{ vm_hostvars.infra_vm_vcpus }}"
volumes: "{{ vm_hostvars.infra_vm_volumes + [infra_vm_configdrive_volume] }}"
state: "absent"
become: True

View File

@ -0,0 +1,18 @@
---
- import_tasks: prerequisites.yml
- name: list all VMs on hypervisor
virt:
command: list_vms
register: all_vms
become: true
- name: "{{ infra_vm_action | capitalize }} infra VMs (loop)"
include_tasks: "{{ infra_vm_action }}.yml"
vars:
vm_name: "{{ vm_hostvars.infra_vm_name }}"
vm_hostvars: "{{ hostvars[vm_item] }}"
loop: "{{ infra_vm_vms }}"
when: (infra_vm_action == "deploy" and vm_name not in all_vms.list_vms) or infra_vm_action == "destroy"
loop_control:
loop_var: vm_item

View File

@ -0,0 +1,18 @@
---
# NOTE(priteau): On seed hypervisors running CentOS 8, the configdrive role
# will fail to install coreutils if coreutils-single is already present.
# Until the role handles it, install it using the --allowerasing option
# which will remove coreutils-single.
- name: Ensure coreutils package is installed
command: "dnf install coreutils -y --allowerasing"
become: True
when:
- ansible_facts.os_family == 'RedHat'
- name: Ensure the image cache directory exists
file:
path: "{{ image_cache_path }}"
state: directory
owner: "{{ ansible_facts.user_uid }}"
group: "{{ ansible_facts.user_gid }}"
become: True

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure sysctl parameters are configured - name: Ensure sysctl parameters are configured
hosts: seed:seed-hypervisor:overcloud hosts: seed:seed-hypervisor:overcloud:infra-vms
tags: tags:
- sysctl - sysctl
roles: roles:

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure timezone is configured - name: Ensure timezone is configured
hosts: seed-hypervisor:seed:overcloud hosts: seed-hypervisor:seed:overcloud:infra-vms
tags: tags:
- timezone - timezone
tasks: tasks:

View File

@ -1,6 +1,6 @@
--- ---
- name: Ensure users exist - name: Ensure users exist
hosts: seed:seed-hypervisor:overcloud hosts: seed:seed-hypervisor:overcloud:infra-vms
tags: tags:
- users - users
roles: roles:

View File

@ -10,5 +10,6 @@ administrative tasks.
general general
seed seed
infra-vms
overcloud overcloud
bare-metal bare-metal

View File

@ -0,0 +1,79 @@
=======================
Infra VM Administration
=======================
Deprovisioning Infrastructure VMs
=================================
.. note::
This step will destroy the infrastructure VMs and associated data volumes.
Make sure you backup any data you want to keep.
To deprovision all VMs::
(kayobe) $ kayobe infra vm deprovision
This can be limited to a subset of the nodes using the ``--limit`` option::
(kayobe) $ kayobe infra vm deprovision --limit example-vm-1
Updating Packages
=================
It is possible to update packages on the infrastructure VMs.
Package Repositories
--------------------
If using custom DNF package repositories on CentOS, it may be necessary to
update these prior to running a package update. To do this, update the
configuration in ``${KAYOBE_CONFIG_PATH}/dnf.yml`` and run the following
command::
(kayobe) $ kayobe infra vm host configure --tags dnf
Package Update
--------------
To update one or more packages::
(kayobe) $ kayobe infra vm host package update --packages <package1>,<package2>
To update all eligible packages, use ``*``, escaping if necessary::
(kayobe) $ kayobe infra vm host package update --packages "*"
To only install updates that have been marked security related::
(kayobe) $ kayobe infra vm host package update --packages "*" --security
Note that these commands do not affect packages installed in containers, only
those installed on the host.
Kernel Updates
--------------
If the kernel has been updated, you will probably want to reboot the host
to boot into the new kernel. This can be done using a command such as the
following::
(kayobe) $ kayobe infra vm host command run --command "shutdown -r" --become
Running Commands
================
It is possible to run a command on the host::
(kayobe) $ kayobe infra vm host command run --command "<command>"
For example::
(kayobe) $ kayobe infra vm host command run --command "service docker restart"
Commands can also be run on the seed hypervisor host, if one is in use::
(kayobe) $ kayobe seed hypervisor host command run --command "<command>"
To execute the command with root privileges, add the ``--become`` argument.
Adding the ``--verbose`` argument allows the output of the command to be seen.

View File

@ -21,4 +21,5 @@ options.
ironic-python-agent ironic-python-agent
docker-registry docker-registry
seed-custom-containers seed-custom-containers
infra-vms
nova-cells nova-cells

View File

@ -0,0 +1,97 @@
.. _configuration-infra-vms:
==================
Infrastructure VMs
==================
Kayobe can deploy infrastructure VMs to the seed-hypervisor. These can be used
to provide supplementary services that do not run well within a containerised
environment or are dependencies of the control plane.
Configuration
=============
To deploy an infrastructure VM, add a new host to the the ``infra-vms`` group
in the inventory:
.. code-block:: ini
:caption: ``$KAYOBE_CONFIG_PATH/inventory/infra-vms``
[infra-vms]
an-example-vm
The configuration of the virtual machine should be done using ``host_vars``.
These override the ``group_vars`` defined for the ``infra-vms`` group. Most
variables have sensible defaults defined, but there are a few variables which
must be set.
Mandatory variables
-------------------
All networks must have an interface defined, as described in
:ref:`configuration-network-per-host`. By default the VMs are attached
to the admin overcloud network. If, for example, ``admin_oc_net_name`` was
set to ``example_net``, you would need to define ``example_net_interface``.
It is possible to change the list of networks that a VM is attached to
by modifying ``infra_vm_network_interfaces``. Additional interfaces
can be added by setting ``infra_vm_network_interfaces_extra``.
List of Kayobe applied defaults to required docker_container variables.
Any of these variables can be overridden with a ``host_var``.
.. literalinclude:: ../../../../ansible/group_vars/all/infra-vms
:language: yaml
Customisations
--------------
Examples of common customisations are shown below.
By default the Ansible inventory name is used as the name of the VM. This may
be overridden via ``infra_vm_name``:
.. code-block:: yaml
:caption: ``$KAYOBE_CONFIG_PATH/inventory/host_vars/an-example-vm``
# Name of the infra VM.
infra_vm_name: "the-special-one"
By default the VM has 16G of RAM. This may be changed via
``infra_vm_memory_mb``:
.. code-block:: yaml
:caption: ``$KAYOBE_CONFIG_PATH/inventory/host_vars/an-example-vm``
# Memory in MB. Defaults to 16GB.
infra_vm_memory_mb: "{{ 8 * 1024 }}"
The default network configuration attaches infra VMs to the admin network. If
this is not appropriate, modify ``infra_vm_network_interfaces``. At a minimum
the network interface name for the network should be defined.
.. code-block:: yaml
:caption: ``$KAYOBE_CONFIG_PATH/inventory/host_vars/an-example-vm``
# Network interfaces that the VM is attached to.
infra_vm_network_interfaces:
- aio
# Mandatory: All networks must have an interface defined.
aio_interface: eth0
# By default kayobe will connect to a host via ``admin_oc_net``.
# As we have not attached this VM to this network, we must override
# ansible_host.
ansible_host: "{{ 'aio' | net_ip }}"
Configuration for all VMs can be set using ``extra_vars`` defined in
``$KAYOBE_CONFIG_PATH/infra-vms.yml``. Note that normal Ansible precedence
rules apply and the variables will override any ``host_vars``. If you need to
override the defaults, but still maintain per-host settings, use ``group_vars``
instead.
Deploying the virtual machine
=============================
Once the initial configuration has been done follow the steps in
:ref:`deployment-infrastructure-vms`.

View File

@ -390,6 +390,8 @@ An interface will be assigned an IP address if the associated network has a
the ``allocation_pool_start`` and ``allocation_pool_end`` attributes, if one the ``allocation_pool_start`` and ``allocation_pool_end`` attributes, if one
has not been statically assigned in ``network-allocation.yml``. has not been statically assigned in ``network-allocation.yml``.
.. _configuration-network-interface:
Configuring Ethernet Interfaces Configuring Ethernet Interfaces
------------------------------- -------------------------------

View File

@ -169,6 +169,8 @@ Then, to run the ``foo.yml`` playbook::
(kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/foo.yml (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/foo.yml
.. _custom-playbooks-hooks:
Hooks Hooks
===== =====

View File

@ -75,6 +75,8 @@ Seed Hypervisor
bare metal host or a VM provisioned outside of Kayobe, this section may be bare metal host or a VM provisioned outside of Kayobe, this section may be
skipped. skipped.
.. _deployment-seed-hypervisor-host-configure:
Host Configuration Host Configuration
------------------ ------------------
@ -110,6 +112,8 @@ volumes. To provision the seed VM::
When this command has completed the seed VM should be active and accessible via When this command has completed the seed VM should be active and accessible via
SSH. Kayobe will update the Ansible inventory with the IP address of the VM. SSH. Kayobe will update the Ansible inventory with the IP address of the VM.
.. _deployment-seed-host-configure:
Host Configuration Host Configuration
------------------ ------------------
@ -184,7 +188,7 @@ After this command has completed the seed services will be active.
:ref:`configuration-bifrost-overcloud-root-image` provides information on :ref:`configuration-bifrost-overcloud-root-image` provides information on
configuring the root disk image build process. See :ref:`here configuring the root disk image build process. See :ref:`here
<configuration-seed-custom-containers>` for information about deploying <configuration-seed-custom-containers>` for information about deploying
additional, custom containers on seed node. additional, custom services (containers) on a seed node.
Building Deployment Images Building Deployment Images
-------------------------- --------------------------
@ -236,6 +240,112 @@ Leave the seed VM and return to the shell on the Ansible control host::
$ exit $ exit
.. _deployment-infrastructure-vms:
Infrastructure VMs
===================
.. warning::
Support for infrastructure VMs is considered experimental: its
design may change in future versions without a deprecation period.
.. note::
It necessary to perform some configuration before these steps
can be followed. Please see :ref:`configuration-infra-vms`.
VM Provisioning
---------------
The hypervisor used to host a VM is controlled via the ``infra_vm_hypervisor``
variable. It defaults to use the seed hypervisor. All hypervisors should have
CentOS or Ubuntu with ``libvirt`` installed. It should have ``libvirt`` networks
configured for all networks that the VM needs access to and a ``libvirt``
storage pool available for the VM's volumes. The steps needed for for the
:ref:`seed<deployment-seed-host-configure>` and the
:ref:`seed hypervisor<deployment-seed-hypervisor-host-configure>` can be found
above.
To provision the infra VMs::
(kayobe) $ kayobe infra vm provision
When this command has completed the infra VMs should be active and accessible
via SSH. Kayobe will update the Ansible inventory with the IP address of the
VM.
Host Configuration
------------------
To configure the infra VM host OS::
(kayobe) $ kayobe infra vm host configure
.. note::
If the infra VM host uses disks that have been in use in a previous
installation, it may be necessary to wipe partition and LVM data from those
disks. To wipe all disks that are not mounted during host configuration::
(kayobe) $ kayobe infra vm host configure --wipe-disks
.. seealso::
Information on configuration of hosts is available :ref:`here
<configuration-hosts>`.
Using Hooks to deploy services on the VMs
-----------------------------------------
A no-op service deployment command is provided to perform additional
configuration. The intention is for users to define :ref:`hooks to custom
playbooks <custom-playbooks-hooks>` that define any further configuration or
service deployment necessary.
To trigger the hooks::
(kayobe) $ kayobe infra vm service deploy
Example
^^^^^^^
In this example we have an infra VM host called ``dns01`` that provides DNS
services. The host could be added to a ``dns-servers`` group in the inventory:
.. code-block:: ini
:caption: ``$KAYOBE_CONFIG_PATH/inventory/infra-vms``
[dns-servers]
an-example-vm
[infra-vms:children]
dns-servers
We have a custom playbook targeting the ``dns-servers`` group that sets up
the DNS server:
.. code-block:: yaml
:caption: ``$KAYOBE_CONFIG_PATH/ansible/dns-server.yml``
---
- name: Deploy DNS servers
hosts: dns-servers
tasks:
- name: Install bind packages
package:
name:
- bind
- bind-utils
become: true
Finally, we add a symlink to set up the playbook as a hook for the ``kayobe
infra vm service deploy`` command::
(kayobe) $ mkdir -p ${KAYOBE_CONFIG_PATH}/hooks/infra-vm-host-configure/post.d
(kayobe) $ cd ${KAYOBE_CONFIG_PATH}/hooks/infra-vm-host-configure/post.d
(kayobe) $ ln -s ../../../ansible/dns-server.yml 50-dns-serveryml
Overcloud Overcloud
========= =========

146
etc/kayobe/infra-vms.yml Normal file
View File

@ -0,0 +1,146 @@
---
###############################################################################
# Infrastructure VM configuration.
# Name of the infra VM.
#infra_vm_name:
# Memory in MB.
#infra_vm_memory_mb:
# Number of vCPUs.
#infra_vm_vcpus:
# List of volumes.
#infra_vm_volumes:
# Root volume.
#infra_vm_root_volume:
# Data volume.
#infra_vm_data_volume:
# Name of the storage pool for the infra VM volumes.
#infra_vm_pool:
# Capacity of the infra VM root volume.
#infra_vm_root_capacity:
# Format of the infra VM root volume.
#infra_vm_root_format:
# Base image for the infra VM root volume. Default is
# "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img"
# when os_distribution is "ubuntu", or
# "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210210.0.x86_64.qcow2"
# otherwise.
#infra_vm_root_image:
# Capacity of the infra VM data volume.
#infra_vm_data_capacity:
# Format of the infra VM data volume.
#infra_vm_data_format:
# List of network interfaces to attach to the infra VM.
#infra_vm_interfaces:
# Hypervisor that the VM runs on.
#infra_vm_hypervisor:
# Customise ansible_ssh_extra_args for the test that checks SSH connectivity
# after provisioning. Defaults to disabling ssh host key checking.
#infra_vm_wait_connection_ssh_extra_args:
# OS family. Needed for config drive generation.
# infra_vm_os_family:
###############################################################################
# Infrastructure VM node configuration.
# User with which to access the infrastructure vm via SSH during bootstrap, in
# order to setup the Kayobe user account.
#infra_vm_bootstrap_user:
###############################################################################
# Infrastructure VM network interface configuration.
# List of networks to which infrastructure vm nodes are attached.
#infra_vm_network_interfaces:
# List of default networks to which infrastructure vm nodes are attached.
#infra_vm_default_network_interfaces:
# List of extra networks to which infrastructure vm nodes are attached.
#infra_vm_extra_network_interfaces:
###############################################################################
# Infrastructure VM node software RAID configuration.
# List of software RAID arrays. See mrlesmithjr.mdadm role for format.
#infra_vm_mdadm_arrays:
###############################################################################
# Infrastructure VM node encryption configuration.
# List of block devices to encrypt. See stackhpc.luks role for format.
#infra_vm_luks_devices:
###############################################################################
# Infrastructure VM node LVM configuration.
# List of infrastructure vm volume groups. See mrlesmithjr.manage-lvm role for
# format.
#infra_vm_lvm_groups:
# Default list of infrastructure vm volume groups. See mrlesmithjr.manage-lvm
# role for format.
#infra_vm_lvm_groups_default:
# Additional list of infrastructure vm volume groups. See mrlesmithjr.manage-lvm
# role for format.
#infra_vm_lvm_groups_extra:
# Whether a 'data' LVM volume group should exist on the infrastructure vm. By
# default this contains a 'docker-volumes' logical volume for Docker volume
# storage. It will also be used for Docker container and image storage if
# 'docker_storage_driver' is set to 'devicemapper'. Default is true if
# 'docker_storage_driver' is set to 'devicemapper', or false otherwise.
#infra_vm_lvm_group_data_enabled:
# Infrastructure VM LVM volume group for data. See mrlesmithjr.manage-lvm role
# for format.
#infra_vm_lvm_group_data:
# List of disks for use by infrastructure vm LVM data volume group. Default to
# an invalid value to require configuration.
#infra_vm_lvm_group_data_disks:
# List of LVM logical volumes for the data volume group.
#infra_vm_lvm_group_data_lvs:
# Docker volumes LVM backing volume.
#infra_vm_lvm_group_data_lv_docker_volumes:
# Size of docker volumes LVM backing volume.
#infra_vm_lvm_group_data_lv_docker_volumes_size:
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
#infra_vm_lvm_group_data_lv_docker_volumes_fs:
###############################################################################
# Infrastructure VM node sysctl configuration.
# Dict of sysctl parameters to set.
#infra_vm_sysctl_parameters:
###############################################################################
# Infrastructure VM node user configuration.
# List of users to create. This should be in a format accepted by the
# singleplatform-eng.users role.
#infra_vm_users:
###############################################################################
# Dummy variable to allow Ansible to accept this file.
workaround_ansible_issue_8743: yes

View File

@ -14,6 +14,16 @@
# Build container images on the seed by default. # Build container images on the seed by default.
seed seed
###############################################################################
# Infra VM groups.
[hypervisors:children]
# Group that contains all hypervisors used for infra VMs
seed-hypervisor
[infra-vms]
# Empty group to provide declaration of infra-vms group.
############################################################################### ###############################################################################
# Overcloud groups. # Overcloud groups.

View File

@ -823,6 +823,171 @@ class SeedDeploymentImageBuild(KayobeAnsibleMixin, VaultMixin, Command):
extra_vars=extra_vars) extra_vars=extra_vars)
class InfraVMProvision(KayobeAnsibleMixin, VaultMixin, Command):
"""Provisions infra virtual machines
* Allocate IP addresses for all configured networks.
* Provision a virtual machine using libvirt.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Provisioning infra VMs")
self.run_kayobe_playbook(parsed_args,
_get_playbook_path("ip-allocation"),
limit="infra-vms")
limit_arg = utils.intersect_limits(parsed_args.limit, "infra-vms")
# We want the limit to affect one play only. To do this we use a
# variable to override the hosts list instead of using limit.
extra_vars = {
"infra_vm_limit": limit_arg
}
self.run_kayobe_playbook(parsed_args,
_get_playbook_path("infra-vm-provision"),
ignore_limit=True, extra_vars=extra_vars)
class InfraVMDeprovision(KayobeAnsibleMixin, VaultMixin, Command):
"""Deprovisions infra virtual machines.
This will destroy all infra VMs and all associated volumes.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Deprovisioning infra VMs")
# We want the limit to affect one play only. To do this we use a
# variable to override the hosts list instead of using limit.
limit_arg = utils.intersect_limits(parsed_args.limit, "infra-vms")
extra_vars = {
"infra_vm_limit": limit_arg
}
self.run_kayobe_playbook(parsed_args,
_get_playbook_path("infra-vm-deprovision"),
ignore_limit=True, extra_vars=extra_vars)
class InfraVMHostConfigure(KayobeAnsibleMixin, VaultMixin,
Command):
"""Configure the infra VMs host OS and services.
* Allocate IP addresses for all configured networks.
* Add the host to SSH known hosts.
* Configure a user account for use by kayobe for SSH access.
* Configure package repos.
* Configure a PyPI mirror.
* Optionally, create a virtualenv for remote target hosts.
* Optionally, wipe unmounted disk partitions (--wipe-disks).
* Configure user accounts, group associations, and authorised SSH keys.
* Disable SELinux.
* Configure the host's network interfaces.
* Set sysctl parameters.
* Disable bootstrap interface configuration.
* Configure timezone.
* Optionally, configure software RAID arrays.
* Optionally, configure encryption.
* Configure LVM volumes.
"""
def get_parser(self, prog_name):
parser = super(InfraVMHostConfigure, self).get_parser(prog_name)
group = parser.add_argument_group("Host Configuration")
group.add_argument("--wipe-disks", action='store_true',
help="wipe partition and LVM data from all disks "
"that are not mounted. Warning: this can "
"result in the loss of data")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Configuring Infra VMs host OS")
# Allocate IP addresses.
playbooks = _build_playbook_list("ip-allocation")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="infra-vms")
# Kayobe playbooks.
playbooks = _build_playbook_list(
"ssh-known-host", "kayobe-ansible-user",
"dnf", "pip", "kayobe-target-venv")
if parsed_args.wipe_disks:
playbooks += _build_playbook_list("wipe-disks")
playbooks += _build_playbook_list(
"users", "dev-tools", "disable-selinux", "network",
"sysctl", "disable-glean", "disable-cloud-init", "time",
"mdadm", "luks", "lvm", "docker-devicemapper", "docker")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="infra-vms")
class InfraVMHostPackageUpdate(KayobeAnsibleMixin, VaultMixin, Command):
"""Update packages on the infra VMs."""
def get_parser(self, prog_name):
parser = super(InfraVMHostPackageUpdate, self).get_parser(prog_name)
group = parser.add_argument_group("Host Package Updates")
group.add_argument("--packages", required=True,
help="List of packages to update. Use '*' to "
"update all packages.")
group.add_argument("--security", action='store_true',
help="Only install updates that have been marked "
"security related.")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Updating infra vm host packages")
extra_vars = {
"host_package_update_packages": parsed_args.packages,
"host_package_update_security": parsed_args.security,
}
playbooks = _build_playbook_list("host-package-update")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="infra-vms",
extra_vars=extra_vars)
class InfraVMHostCommandRun(KayobeAnsibleMixin, VaultMixin, Command):
"""Run command on the infra VMs."""
def get_parser(self, prog_name):
parser = super(InfraVMHostCommandRun, self).get_parser(prog_name)
group = parser.add_argument_group("Host Command Run")
group.add_argument("--command", required=True,
help="Command to run (required).")
group.add_argument("--show-output", action='store_true',
help="Show command output")
return parser
def take_action(self, parsed_args):
self.app.LOG.debug("Run command on infra VM hosts")
extra_vars = {
"host_command_to_run": utils.escape_jinja(parsed_args.command),
"show_output": parsed_args.show_output}
playbooks = _build_playbook_list("host-command-run")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="infra-vms",
extra_vars=extra_vars)
class InfraVMHostUpgrade(KayobeAnsibleMixin, VaultMixin, Command):
"""Upgrade the infra VM host services.
Performs the changes necessary to make the host services suitable for the
configured OpenStack release.
"""
def take_action(self, parsed_args):
self.app.LOG.debug("Upgrading the infra-vm host services")
playbooks = _build_playbook_list("kayobe-target-venv")
self.run_kayobe_playbooks(parsed_args, playbooks,
limit="infra-vms")
class InfraVMServiceDeploy(KayobeAnsibleMixin, VaultMixin,
Command):
"""Run hooks for infra structure services."""
def take_action(self, parsed_args):
self.app.LOG.debug("Running no-op Infra VM service deploy")
class OvercloudInventoryDiscover(KayobeAnsibleMixin, VaultMixin, Command): class OvercloudInventoryDiscover(KayobeAnsibleMixin, VaultMixin, Command):
"""Discover the overcloud inventory from the seed's Ironic service. """Discover the overcloud inventory from the seed's Ironic service.

View File

@ -919,6 +919,189 @@ class TestCase(unittest.TestCase):
] ]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list) self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbook")
def test_infra_vm_provision(self, mock_run):
command = commands.InfraVMProvision(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
utils.get_data_files_path(
"ansible", "ip-allocation.yml"),
limit="infra-vms"
),
mock.call(
mock.ANY,
utils.get_data_files_path(
"ansible", "infra-vm-provision.yml"),
ignore_limit=True,
extra_vars={'infra_vm_limit': 'infra-vms'}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbook")
def test_infra_vm_deprovision(self, mock_run):
command = commands.InfraVMDeprovision(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
utils.get_data_files_path(
"ansible", "infra-vm-deprovision.yml"),
ignore_limit=True,
extra_vars={'infra_vm_limit': 'infra-vms'}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_infra_vm_host_configure(self, mock_run):
command = commands.InfraVMHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[utils.get_data_files_path("ansible", "ip-allocation.yml")],
limit="infra-vms",
),
mock.call(
mock.ANY,
[
utils.get_data_files_path("ansible", "ssh-known-host.yml"),
utils.get_data_files_path(
"ansible", "kayobe-ansible-user.yml"),
utils.get_data_files_path("ansible", "dnf.yml"),
utils.get_data_files_path("ansible", "pip.yml"),
utils.get_data_files_path(
"ansible", "kayobe-target-venv.yml"),
utils.get_data_files_path("ansible", "users.yml"),
utils.get_data_files_path("ansible", "dev-tools.yml"),
utils.get_data_files_path(
"ansible", "disable-selinux.yml"),
utils.get_data_files_path("ansible", "network.yml"),
utils.get_data_files_path("ansible", "sysctl.yml"),
utils.get_data_files_path("ansible", "disable-glean.yml"),
utils.get_data_files_path(
"ansible", "disable-cloud-init.yml"),
utils.get_data_files_path("ansible", "time.yml"),
utils.get_data_files_path("ansible", "mdadm.yml"),
utils.get_data_files_path("ansible", "luks.yml"),
utils.get_data_files_path("ansible", "lvm.yml"),
utils.get_data_files_path("ansible",
"docker-devicemapper.yml"),
utils.get_data_files_path("ansible", "docker.yml"),
],
limit="infra-vms",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_infra_vm_host_upgrade(self, mock_run):
command = commands.InfraVMHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
utils.get_data_files_path(
"ansible", "kayobe-target-venv.yml"),
],
limit="infra-vms",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_infra_vm_host_command_run(self, mock_run):
command = commands.InfraVMHostCommandRun(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args(["--command", "ls -a",
"--show-output"])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
utils.get_data_files_path("ansible",
"host-command-run.yml"),
],
limit="infra-vms",
extra_vars={
"host_command_to_run": utils.escape_jinja("ls -a"),
"show_output": True}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_infra_vm_host_package_update_all(self, mock_run):
command = commands.InfraVMHostPackageUpdate(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args(["--packages", "*"])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
utils.get_data_files_path(
"ansible", "host-package-update.yml"),
],
limit="infra-vms",
extra_vars={
"host_package_update_packages": "*",
"host_package_update_security": False,
},
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbook")
def test_infra_vm_service_deploy(self, mock_run):
command = commands.InfraVMServiceDeploy(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = []
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin, @mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks") "run_kayobe_playbooks")
@mock.patch.object(commands.KayobeAnsibleMixin, @mock.patch.object(commands.KayobeAnsibleMixin,

View File

@ -0,0 +1,8 @@
---
features:
- |
Adds support for deploying infrastructure VMs on the seed hypervisor.
These can be used to provide supplementary services that do not run well
within a containerised environment or are dependencies of the control
plane. See `story 2008741
<https://storyboard.openstack.org/#!/story/2008741>` for details.

View File

@ -94,6 +94,13 @@ kayobe.cli=
seed_service_upgrade = kayobe.cli.commands:SeedServiceUpgrade seed_service_upgrade = kayobe.cli.commands:SeedServiceUpgrade
seed_vm_deprovision = kayobe.cli.commands:SeedVMDeprovision seed_vm_deprovision = kayobe.cli.commands:SeedVMDeprovision
seed_vm_provision = kayobe.cli.commands:SeedVMProvision seed_vm_provision = kayobe.cli.commands:SeedVMProvision
infra_vm_deprovision = kayobe.cli.commands:InfraVMDeprovision
infra_vm_provision = kayobe.cli.commands:InfraVMProvision
infra_vm_host_configure = kayobe.cli.commands:InfraVMHostConfigure
infra_vm_host_upgrade = kayobe.cli.commands:InfraVMHostUpgrade
infra_vm_host_command_run = kayobe.cli.commands:InfraVMHostCommandRun
infra_vm_host_package_update = kayobe.cli.commands:InfraVMHostPackageUpdate
infra_vm_service_deploy = kayobe.cli.commands:InfraVMServiceDeploy
kayobe.cli.baremetal_compute_inspect = kayobe.cli.baremetal_compute_inspect =
hooks = kayobe.cli.commands:HookDispatcher hooks = kayobe.cli.commands:HookDispatcher
@ -205,3 +212,17 @@ kayobe.cli.seed_vm_deprovision =
hooks = kayobe.cli.commands:HookDispatcher hooks = kayobe.cli.commands:HookDispatcher
kayobe.cli.seed_vm_provision = kayobe.cli.seed_vm_provision =
hooks = kayobe.cli.commands:HookDispatcher hooks = kayobe.cli.commands:HookDispatcher
kayobe.cli.infra_vm_deprovision =
hooks = kayobe.cli.commands:HookDispatcher
kayobe.cli.infra_vm_provision =
hooks = kayobe.cli.commands:HookDispatcher
kayobe.cli.infra_vm_host_configure =
hooks = kayobe.cli.commands:HookDispatcher
kayobe.cli.infra_vm_host_upgrade =
hooks = kayobe.cli.commands:HookDispatcher
kayobe.cli.infra_vm_host_command_run =
hooks = kayobe.cli.commands:HookDispatcher
kayobe.cli.infra_vm_host_package_update =
hooks = kayobe.cli.commands:HookDispatcher
kayobe.cli.infra_vm_service_deploy =
hooks = kayobe.cli.commands:HookDispatcher