Merge "Add a template for vCenter related devops environment In order to optimize resources on labs we can use yaml template for devops environment. Typically we deploy env with 3 controllers and several other nodes with vmware-related roles. Controller nodes use more resources than other nodes, so we can set different memory size and amount of cpu for different nodes to optimize resource utilization."

This commit is contained in:
Jenkins 2016-09-08 16:48:03 +00:00 committed by Gerrit Code Review
commit 7e09b176a2
27 changed files with 233 additions and 0 deletions

View File

@ -0,0 +1,207 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default_interface_model:
- &interface_model !os_env INTERFACE_MODEL, e1000
rack-01-slave-interfaces: &rack-01-slave-interfaces
- label: eth0
l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: eth1
l2_network_device: public
interface_model: *interface_model
- label: eth2
l2_network_device: management
interface_model: *interface_model
- label: eth3
l2_network_device: private
interface_model: *interface_model
- label: eth4
l2_network_device: storage
interface_model: *interface_model
rack-01-slave-network_config: &rack-01-slave-network_config
eth0:
networks:
- fuelweb_admin
eth1:
networks:
- public
eth2:
networks:
- management
eth3:
networks:
- private
eth4:
networks:
- storage
rack-01-slave-node-params: &rack-01-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 4
memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *rack-01-slave-interfaces
network_config: *rack-01-slave-network_config
rack-02-slave-node-params: &rack-02-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *rack-01-slave-interfaces
network_config: *rack-01-slave-network_config
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
public-pool01:
net: *pool_default
params:
tag: 0
storage-pool01:
net: *pool_default
params:
tag: 101
management-pool01:
net: *pool_default
params:
tag: 102
private-pool01:
net: *pool_default
params:
tag: 103
groups:
- name: cat
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
public:
address_pool: public-pool01
dhcp: false
forward:
mode: nat
storage:
address_pool: storage-pool01
dhcp: false
management:
address_pool: management-pool01
dhcp: false
private:
address_pool: private-pool01
dhcp: false
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: eth0
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
interface_model: *interface_model
network_config:
eth0:
networks:
- fuelweb_admin
- name: slave-01
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-02
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-03
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-04
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-05
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-06
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-07
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-08
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-09
role: fuel_slave
params: *rack-02-slave-node-params

View File

@ -2,6 +2,7 @@
template:
name: 3 controllers, 1 compute + cinder, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 6
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_add_cindervmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 compute + cinder, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 3
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_add_computevmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller + mongo, 1 compute + cinder, 1 cinder-vmware, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 4
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_ceilometer_and_computevmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 compute, 1 compute-vmware, 2 ceph-osd on Neutron/VLAN with DVS plugin
slaves: 5
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_ceph_and_computevmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 cinder-vmware, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 3
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_cindervmdk_and_computevmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 2 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 3
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_computevmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 3 controllers, 1 compute + cinder, 1 cinder-vmware, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 6
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_delete_cindervmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 compute + cinder, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 3
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_delete_computevmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 3 controllers, 1 compute + cinder, 1 cinder-vmware, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 6
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_dvs_bvt
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 5 nodes on Neutron/VLAN with DVS plugin (3 controllers, 1 compute + cinder, 1 cinder-vmware)
slaves: 5
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_dvs_failover
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 Controller on Neutron/VLAN with DVS plugin
slaves: 1
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_dvs_smoke
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 Controller on Neutron/VLAN with DVS plugin and Glance
slaves: 1
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_glance_backend
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 2
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_glance_backend_and_computevmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 3 controller + mongo, 1 compute + cinder, 1 cinder-vmware on Neutron/VLAN with DVS plugin
slaves: 5
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_ha_ceilometer
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 3 controller, 1 compute, 2 ceph-osd on Neutron/VLAN with DVS plugin
slaves: 6
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_ha_ceph
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 3 controller, 1 ceph-osd + cinder-vmware, 1 ceph-osd + cinder on Neutron/VLAN with DVS plugin
slaves: 5
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_ha_cinder_and_ceph
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 3 controllers, 1 compute + cinder, 1 cinder-vmware on Neutron/VLAN with DVS plugin
slaves: 5
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_ha_multiple_clusters
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller + cinder-vmware + cinder + mongo, 1 compute, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 3
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_multiroles_ceilometer
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller + cinder-vmware + ceph, 1 compute + cinder-vmware + ceph on Neutron/VLAN with DVS plugin
slaves: 2
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_multiroles_cindervmdk_and_ceph
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller + cinder-vmware + cinder, 1 compute + cinder-vmware + cinder on Neutron/VLAN with DVS plugin
slaves: 2
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_multiroles_cindervmdk_and_cinder
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 compute-vmware + cinder-vmware on Neutron/VLAN with DVS plugin
slaves: 2
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_multiroles_computevmware_cindervmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 cinder-vmware on Neutron/VLAN with DVS plugin
slaves: 2
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_reboot_cindervmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 compute + cinder, 2 cinder-vmware on Neutron/VLAN with DVS plugin
slaves: 4
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_shutdown_cindervmware
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 cinder-vmware, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 3
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_roles
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 cinder-vmware, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 3
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_roles_local_ds
release: ubuntu

View File

@ -2,6 +2,7 @@
template:
name: 1 controller, 1 cinder-vmware, 1 compute-vmware on Neutron/VLAN with DVS plugin
slaves: 3
devops_settings: !include devops_configs/vcenter_ha_default.yaml
cluster_template:
name: vcenter_roles_pub_ip
release: ubuntu