Sync fixtures yamls generator with master branch

Let's use the same approach as for master branch with fake nodes

Change-Id: I67509516b713c1b811bd5ec316447e8310728a27
This commit is contained in:
Aleksandr Didenko 2016-08-24 20:01:22 +02:00 committed by Dmitry Ilyin
parent e3bb0df563
commit 6d1087fd6f
37 changed files with 53524 additions and 23429 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1
utils/fixtures/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.json

View File

@ -0,0 +1,737 @@
[
<% nodes.each do |node| %>
{
"pk": <%= node['pk'] %>,
"model": "nailgun.node",
"fields": {
"status": "discover",
"name": "fnode-<%= node['id'] %>",
"hostname": "fnode-<%= node['id'] %>",
"ip": "<%= node['ip'] %>",
"online": true,
"labels": {},
"pending_addition": false,
"platform_name": "X9DRW",
"mac": "<%= node['main_mac'] %>",
"meta": {
"cpu": {
"real": 2,
"total": 24,
"spec": [
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
},
{
"model": "Intel(R) Xeon(R) CPU E5-2620 0 @ 2.00GHz",
"frequency": 2001
}
]
},
"interfaces": [
{
"bus_info": "0000:00:03.0",
"current_speed": 1000,
"driver": "e1000",
"interface_properties": {
"numa_node": null,
"pci_id": "8086:100e",
"sriov": {
"available": false,
"pci_id": "",
"sriov_totalvfs": 0
}
},
"ip": "<%= node['ip'] %>",
"mac": "<%= node['main_mac'] %>",
"max_speed": 1000,
"name": "enp0s3",
"netmask": "255.255.255.0",
"offloading_modes": [
{
"name": "rx-all",
"state": null,
"sub": []
},
{
"name": "rx-fcs",
"state": null,
"sub": []
},
{
"name": "tx-nocache-copy",
"state": null,
"sub": []
},
{
"name": "rx-vlan-offload",
"state": null,
"sub": []
},
{
"name": "generic-receive-offload",
"state": null,
"sub": []
},
{
"name": "generic-segmentation-offload",
"state": null,
"sub": []
},
{
"name": "tcp-segmentation-offload",
"state": null,
"sub": [
{
"name": "tx-tcp-segmentation",
"state": null,
"sub": []
}
]
},
{
"name": "scatter-gather",
"state": null,
"sub": [
{
"name": "tx-scatter-gather",
"state": null,
"sub": []
}
]
},
{
"name": "tx-checksumming",
"state": null,
"sub": [
{
"name": "tx-checksum-ip-generic",
"state": null,
"sub": []
}
]
},
{
"name": "rx-checksumming",
"state": null,
"sub": []
}
],
"pxe": false,
"state": "up"
},
{
"bus_info": "0000:00:04.0",
"current_speed": 1000,
"driver": "e1000",
"interface_properties": {
"numa_node": null,
"pci_id": "8086:100e",
"sriov": {
"available": false,
"pci_id": "",
"sriov_totalvfs": 0
}
},
"mac": "<%= node['nic2_mac'] %>",
"max_speed": 1000,
"name": "enp0s4",
"offloading_modes": [
{
"name": "rx-all",
"state": null,
"sub": []
},
{
"name": "rx-fcs",
"state": null,
"sub": []
},
{
"name": "tx-nocache-copy",
"state": null,
"sub": []
},
{
"name": "rx-vlan-offload",
"state": null,
"sub": []
},
{
"name": "generic-receive-offload",
"state": null,
"sub": []
},
{
"name": "generic-segmentation-offload",
"state": null,
"sub": []
},
{
"name": "tcp-segmentation-offload",
"state": null,
"sub": [
{
"name": "tx-tcp-segmentation",
"state": null,
"sub": []
}
]
},
{
"name": "scatter-gather",
"state": null,
"sub": [
{
"name": "tx-scatter-gather",
"state": null,
"sub": []
}
]
},
{
"name": "tx-checksumming",
"state": null,
"sub": [
{
"name": "tx-checksum-ip-generic",
"state": null,
"sub": []
}
]
},
{
"name": "rx-checksumming",
"state": null,
"sub": []
}
],
"pxe": false,
"state": "down"
},
{
"bus_info": "0000:00:05.0",
"current_speed": 1000,
"driver": "e1000",
"interface_properties": {
"numa_node": null,
"pci_id": "8086:100e",
"sriov": {
"available": false,
"pci_id": "",
"sriov_totalvfs": 0
}
},
"mac": "<%= node['nic3_mac'] %>",
"max_speed": 1000,
"name": "enp0s5",
"offloading_modes": [
{
"name": "rx-all",
"state": null,
"sub": []
},
{
"name": "rx-fcs",
"state": null,
"sub": []
},
{
"name": "tx-nocache-copy",
"state": null,
"sub": []
},
{
"name": "rx-vlan-offload",
"state": null,
"sub": []
},
{
"name": "generic-receive-offload",
"state": null,
"sub": []
},
{
"name": "generic-segmentation-offload",
"state": null,
"sub": []
},
{
"name": "tcp-segmentation-offload",
"state": null,
"sub": [
{
"name": "tx-tcp-segmentation",
"state": null,
"sub": []
}
]
},
{
"name": "scatter-gather",
"state": null,
"sub": [
{
"name": "tx-scatter-gather",
"state": null,
"sub": []
}
]
},
{
"name": "tx-checksumming",
"state": null,
"sub": [
{
"name": "tx-checksum-ip-generic",
"state": null,
"sub": []
}
]
},
{
"name": "rx-checksumming",
"state": null,
"sub": []
}
],
"pxe": false,
"state": "down"
},
{
"bus_info": "0000:00:06.0",
"current_speed": 1000,
"driver": "e1000",
"interface_properties": {
"numa_node": null,
"pci_id": "8086:100e",
"sriov": {
"available": false,
"pci_id": "",
"sriov_totalvfs": 0
}
},
"mac": "<%= node['nic4_mac'] %>",
"max_speed": 1000,
"name": "enp0s6",
"offloading_modes": [
{
"name": "rx-all",
"state": null,
"sub": []
},
{
"name": "rx-fcs",
"state": null,
"sub": []
},
{
"name": "tx-nocache-copy",
"state": null,
"sub": []
},
{
"name": "rx-vlan-offload",
"state": null,
"sub": []
},
{
"name": "generic-receive-offload",
"state": null,
"sub": []
},
{
"name": "generic-segmentation-offload",
"state": null,
"sub": []
},
{
"name": "tcp-segmentation-offload",
"state": null,
"sub": [
{
"name": "tx-tcp-segmentation",
"state": null,
"sub": []
}
]
},
{
"name": "scatter-gather",
"state": null,
"sub": [
{
"name": "tx-scatter-gather",
"state": null,
"sub": []
}
]
},
{
"name": "tx-checksumming",
"state": null,
"sub": [
{
"name": "tx-checksum-ip-generic",
"state": null,
"sub": []
}
]
},
{
"name": "rx-checksumming",
"state": null,
"sub": []
}
],
"pxe": false,
"state": "down"
},
{
"bus_info": "0000:00:07.0",
"current_speed": 1000,
"driver": "e1000",
"interface_properties": {
"numa_node": null,
"pci_id": "8086:100e",
"sriov": {
"available": false,
"pci_id": "",
"sriov_totalvfs": 0
}
},
"mac": "<%= node['nic5_mac'] %>",
"max_speed": 1000,
"name": "enp0s7",
"offloading_modes": [
{
"name": "rx-all",
"state": null,
"sub": []
},
{
"name": "rx-fcs",
"state": null,
"sub": []
},
{
"name": "tx-nocache-copy",
"state": null,
"sub": []
},
{
"name": "rx-vlan-offload",
"state": null,
"sub": []
},
{
"name": "generic-receive-offload",
"state": null,
"sub": []
},
{
"name": "generic-segmentation-offload",
"state": null,
"sub": []
},
{
"name": "tcp-segmentation-offload",
"state": null,
"sub": [
{
"name": "tx-tcp-segmentation",
"state": null,
"sub": []
}
]
},
{
"name": "scatter-gather",
"state": null,
"sub": [
{
"name": "tx-scatter-gather",
"state": null,
"sub": []
}
]
},
{
"name": "tx-checksumming",
"state": null,
"sub": [
{
"name": "tx-checksum-ip-generic",
"state": null,
"sub": []
}
]
},
{
"name": "rx-checksumming",
"state": null,
"sub": []
}
],
"pxe": false,
"state": "down"
}
],
"disks": [
{
"model": "TOSHIBA MK1002TS",
"name": "sda",
"disk": "sda",
"size": 1000204886016
},
{
"model": "TOSHIBA MK1002TS",
"name": "sdb",
"disk": "sdb",
"size": 1000204886016
},
{
"model": "TOSHIBA MK1002TS",
"name": "sdc",
"disk": "sdc",
"size": 1000204886016
},
{
"model": "TOSHIBA MK1002TS",
"name": "sdd",
"disk": "sdd",
"size": 1000204886016
},
{
"model": "Virtual Floppy0",
"name": "sde",
"disk": "sde",
"size": 0
},
{
"model": "Virtual HDisk0",
"name": "sdf",
"disk": "sdf",
"size": 0
}
],
"system": {
"product": "X9DRW",
"family": "To be filled by O.E.M.",
"fqdn": "srv08-srt.srt.mirantis.net",
"version": "0123456789",
"serial": "0123456789",
"manufacturer": "Supermicro"
},
"memory": {
"slots": 1,
"total": 137455730688,
"maximum_capacity": 274894684160,
"devices": [
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"frequency": 1333,
"type": "DDR3",
"size": 8589934592
},
{
"type": "Flash",
"size": 16777216
}
]
},
"numa_topology": {
"numa_nodes": [
{
"id": 0,
"memory": 85899345920,
"cpus": [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
]
},
{
"id": 1,
"memory": 51539607552,
"cpus": [
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23
]
}
],
"supported_hugepages": [2048, 1048576],
"distances": [
["1.0", "2.1"],
["2.1", "1.0"]
]
}
},
"timestamp": "",
"progress": 0,
"pending_deletion": false,
"os_platform": "ubuntu",
"manufacturer": "Supermicro"
}
}<% if node != nodes.last %>,<% end %>
<% end %>
]

View File

@ -0,0 +1,28 @@
#!/usr/bin/env ruby
require 'erb'
nodes = []
ip = ARGV[0].to_s || "10.20.0.100"
ip_tpl = ip.split('.')[0,3].join('.')
ip_start = ip.split('.').last
nodenum = ARGV[1].to_i || 10
for i in 0..(nodenum.to_i - 1) do
pk = ip_start.to_i + i
node = {
"ip" => ip_tpl + "." + pk.to_s,
"pk" => pk,
"id" => pk,
"main_mac" => (1..6).map{"%0.2x"%rand(256)}.join(":"),
"nic2_mac" => (1..6).map{"%0.2x"%rand(256)}.join(":"),
"nic3_mac" => (1..6).map{"%0.2x"%rand(256)}.join(":"),
"nic4_mac" => (1..6).map{"%0.2x"%rand(256)}.join(":"),
"nic5_mac" => (1..6).map{"%0.2x"%rand(256)}.join(":")
}
nodes << node
end
nodes_template = File.read("fixtures/nodes_template.erb")
renderer = ERB.new(nodes_template)
puts output = renderer.result()

View File

@ -1,13 +1,34 @@
#!/bin/bash
#
# Use this script to generate and save astute.yaml fixtures.
# Should be executed on Fuel node with at least 7 discovered
# and unused (not assigned to any env) nodes.
#
# Should be executed on Fuel node with 'advanced' feature enabled
# (see FEATURE_GROUPS list in /etc/nailgun/settings.yaml)
mkdir -p ./yamls
CWD=$(cd `dirname $0` && pwd -P)
mkdir ./yamls
rm -f ./yamls/*
function generate_fake_nodes_fixtures {
# $1 - first IP of admin network to start generate nodes from
# $2 - number of nodes to generate
# $3 - name of fixture
$CWD/generate_nodes_fixtures.rb $1 $2 > $CWD/fixtures/${3}.json
}
function create_fake_nodes {
manage.py loaddata $CWD/fixtures/${1}.json
}
function clean_fake_nodes {
fuel2 node list | grep 'discover' | grep -q 'fnode-' &&
fuel2 node list | awk '/fnode-.* discover /{ print $2 }'
}
function admin_net_tpl {
ruby -rjson -e 'print JSON.parse(ARGV[0]).find{|i| i["id"] == 1}["cidr"]' "`fuel network-group list --json`"
}
function id_of_role {
env=$1
role=$2
@ -20,39 +41,6 @@ function id_of_role {
' $yaml $role
}
function fix_node_names {
file=$1
ruby -ryaml -e '
astute = YAML.load(File.read(ARGV[0]))
astute["network_metadata"]["nodes"].each do |key, hash|
wrong = hash["name"]
puts "\"s/#{wrong}/#{key}/g\""
end
' $file | xargs -I {} sed -e {} -i $file
}
function list_free_nodes {
fuel2 node list 2>/dev/null | grep discover | grep None | awk '{print $2}'
}
function save_yamls {
envid=`fuel env | grep $1 | awk '{print $1}'`
fuel deployment --default --env $envid 2>/dev/null
}
function envid {
fuel env 2>/dev/null | grep $1 | awk '{print $1}'
}
function store_yamls {
for role in $3 ; do
id=`id_of_role $1 $role`
src="deployment_$1/${id}.yaml"
cp $src ./yamls/$2-$role.yaml
fix_node_names ./yamls/$2-$role.yaml
done
}
function enable_ceph {
fuel env --attributes --env $1 --download
ruby -ryaml -e '
@ -66,6 +54,17 @@ function enable_ceph {
attr["editable"]["storage"]["auth_s3_keystone_ceph"]["value"] = true
File.open(ARGV[0], "w").write(attr.to_yaml)' "cluster_$1/attributes.yaml"
fuel env --attributes --env $1 --upload
rm -rf "cluster_$1"
}
function enable_cblock {
fuel env --attributes --env $1 --download
ruby -ryaml -e '
attr = YAML.load(File.read(ARGV[0]))
attr["editable"]["storage"]["volumes_block_device"]["value"] = true
File.open(ARGV[0], "w").write(attr.to_yaml)' "cluster_$1/attributes.yaml"
fuel env --attributes --env $1 --upload
rm -rf "cluster_$1"
}
function enable_murano_sahara_ceilometer {
@ -106,92 +105,88 @@ function enable_neutron_dvr {
fuel env --attributes --env $1 --upload
}
function enable_vcenter {
function enable_nova_quota {
fuel env --attributes --env $1 --download
ruby -ryaml -e '
attr = YAML.load(File.read(ARGV[0]))
attr["editable"]["common"]["use_vcenter"]["value"] = true
attr["editable"]["common"]["nova_quota"]["value"] = true
File.open(ARGV[0], "w").write(attr.to_yaml)' "cluster_$1/attributes.yaml"
fuel env --attributes --env $1 --upload
}
function enable_vcenter_glance {
function enable_public_ssl {
fuel env --attributes --env $1 --download
ruby -ryaml -e '
attr = YAML.load(File.read(ARGV[0]))
attr["editable"]["storage"]["images_vcenter"]["value"] = true
attr["editable"]["public_ssl"]["services"]["value"] = true
attr["editable"]["public_ssl"]["horizon"]["value"] = true
File.open(ARGV[0], "w").write(attr.to_yaml)' "cluster_$1/attributes.yaml"
fuel env --attributes --env $1 --upload
}
function vmware_settings {
compute_vmware=$2
fuel --env $1 vmware-settings --download
ruby -ryaml -e '
vmware = YAML.load(File.read(ARGV[0]))
vcenter_cred = {
"vcenter_host"=>"172.16.0.254", "vcenter_password"=>"Qwer!1234",
"vcenter_username"=>"administrator@vsphere.local"
}
vmware["editable"]["value"]["availability_zones"][0].merge! vcenter_cred
File.open(ARGV[0], "w").write(vmware.to_yaml)' "vmware_settings_$1.yaml"
if [ "$compute_vmware" = "compute-vmware" ]; then
env_id=`envid $1`
node_id=$(list_free_nodes | sed -n '1p')
fuel --env $env_id node set --node $node_id --role compute-vmware
ruby -ryaml -e '
$compute_vmware_node = ARGV[1]
puts $compute_vmware_node
vmware = YAML.load(File.read(ARGV[0]))
vmware_computes = {
"datastore_regex"=>".*", "service_name"=>"vm_cluster1",
"target_node"=>{"current"=>{"id"=>$compute_vmware_node,
"label"=>$compute_vmware_node}, "options"=>[{"id"=>"controllers",
"label"=>"controllers"}, {"id"=>$compute_vmware_node,
"label"=>$compute_vmware_node}]}, "vsphere_cluster"=>"Cluster1"
}
vmware["editable"]["value"]["availability_zones"][0]["nova_computes"][0].merge! vmware_computes
File.open(ARGV[0], "w").write(vmware.to_yaml)' "vmware_settings_$1.yaml" "node-$node_id"
else
ruby -ryaml -e '
vmware = YAML.load(File.read(ARGV[0]))
vmware_computes = {
"datastore_regex"=>".*", "service_name"=>"vm_cluster1",
"target_node"=>{"current"=>{"id"=>"controllers",
"label"=>"controllers"}, "options"=>[{"id"=>"controllers",
"label"=>"controllers"}]}, "vsphere_cluster"=>"Cluster1"
}
vmware_glance = {
"ca_file"=>{"content"=>"RSA", "name"=>"vcenter-ca.pem"},
"datacenter"=>"Datacenter", "datastore"=>"nfs",
"vcenter_host"=>"172.16.0.254", "vcenter_password"=>"Qwer!1234",
"vcenter_username"=>"administrator@vsphere.local"
}
vmware["editable"]["value"]["availability_zones"][0]["nova_computes"][0].merge! vmware_computes
vmware["editable"]["value"]["glance"].merge! vmware_glance
File.open(ARGV[0], "w").write(vmware.to_yaml)' "vmware_settings_$1.yaml"
fi
fuel --env $1 vmware-settings --upload
}
function enable_vms_conf {
virt_node_ids=`fuel nodes --env $1 2>/dev/null | grep virt | awk '{print $1}'`
virt_node_ids=`fuel2 node list --env $1 2>/dev/null | grep virt | awk '{print $2}'`
for id in $virt_node_ids ; do
fuel2 node create-vms-conf $id --conf '{"id":3,"ram":2,"cpu":2}'
done
}
function list_free_nodes {
# list unused nodes from the list of fake nodes
if [ -n "$1" ] ; then
fuel2 node list 2>/dev/null | grep discover | grep None | grep 'fnode-' | grep $1 | awk '{print $2}'
else
fuel2 node list 2>/dev/null | grep discover | grep None | grep 'fnode-' | awk '{print $2}'
fi
}
function save_yamls {
envid=`fuel env | grep $1 | awk '{print $1}'`
fuel deployment --default --env $envid 2>/dev/null
}
function envid {
fuel env 2>/dev/null | grep $1 | awk '{print $1}'
}
function fix_node_names {
file=$1
ruby -ryaml -e '
astute = YAML.load(File.read(ARGV[0]))
astute["network_metadata"]["nodes"].each do |key, hash|
wrong = hash["name"]
puts "\"s/#{wrong}/#{key}/g\""
end
' $file | xargs -I {} sed -e {} -i $file
}
function store_yamls {
for role in $3 ; do
id=`id_of_role $1 $role`
src="deployment_$1/${id}.yaml"
cp $src ./yamls/$2-$role.yaml
fix_node_names ./yamls/$2-$role.yaml
done
}
function generate_yamls {
env=`envid $1`
name=$2
roles=($3)
# Create fake nodes for our fixtures
generate_fake_nodes_fixtures $admin_first_ip 10 default_nodegroup
create_fake_nodes default_nodegroup
if [ "${name/ceph}" != "$name" ] ; then
enable_ceph $env
fi
if [ "${name/murano.sahara.ceil}" != "$name" ] ; then
enable_murano_sahara_ceilometer $env
fi
if [ "${name/nova_quota}" != "$name" ] ; then
enable_nova_quota $env
fi
if [ "${name/ironic}" != "$name" ] ; then
enable_ironic $env
fi
@ -201,18 +196,25 @@ function generate_yamls {
if [ "${name/dvr}" != "$name" ] ; then
enable_neutron_dvr $env
fi
if [ "${name/vmware.glance}" != "$name" ] ; then
enable_vcenter $env
enable_vcenter_glance $env
vmware_settings $env
if [ "${name/public_ssl}" != "$name" ] ; then
enable_public_ssl $env
fi
if [ "${name/vmware.cinder-vmware.compute-vmware}" != "$name" ] ; then
enable_vcenter $env
vmware_settings $env compute-vmware
if [ "${name/cblock}" != "$name" ] ; then
enable_cblock $env
fi
if [ "${name/multirack}" != "$name" ] ; then
# move controllers to custom node group
for id in `list_free_nodes 9.9.9` ; do
if [ "${roles[0]}" = "controller" ] ; then
fuel --env $env node set --node $id --role ${roles[0]}
roles=("${roles[@]:1}")
fi
done
fi
for id in `list_free_nodes` ; do
if ! [ -z "${roles[0]}" ] ; then
if [ -n "${roles[0]}" ] ; then
fuel --env $env node set --node $id --role ${roles[0]}
roles=("${roles[@]:1}")
sleep 1
@ -234,11 +236,36 @@ function clean_env {
fuel env --delete --env $env
rm -rf "cluster_$env"
rm -rf "deployment_$env"
rm -rf "vmware_settings_$env.yaml"
sleep 60
rm -f network_${env}.yaml
sleep 80
fi
clean_fake_nodes
}
function add_nodegroup {
env=`envid $1`
name=$2
fuel --env $env nodegroup --create --name $name
}
function update_default_nodegroup {
env=`envid $1`
fuel network --env $env download
sed -e 's/172\.16\.0\./10.11.1./g' -i network_${env}.yaml
sed -e 's/192\.168\.0\./10.11.2./g' -i network_${env}.yaml
sed -e 's/192\.168\.1\./10.11.3./g' -i network_${env}.yaml
sed -e 's/192\.168\.2\./10.11.4./g' -i network_${env}.yaml
fuel network --env $env upload
}
clean_fake_nodes
sleep 1
# Get some context
admin_net=$(admin_net_tpl)
admin_first_ip="${admin_net}.100"
# Neutron vlan ceph
fuel env --create --name test_neutron_vlan --rel 2 --net vlan
generate_yamls 'test_neutron_vlan' 'neut_vlan.ceph' 'controller controller controller compute ceph-osd ceph-osd' 'primary-controller compute ceph-osd'
@ -246,7 +273,7 @@ clean_env 'test_neutron_vlan'
# Neutron vlan addons
fuel env --create --name test_neutron_vlan --rel 2 --net vlan
generate_yamls 'test_neutron_vlan' 'neut_vlan.murano.sahara.ceil' 'controller controller compute mongo mongo cinder cinder-block-device' 'primary-controller controller compute primary-mongo mongo cinder cinder-block-device'
generate_yamls 'test_neutron_vlan' 'neut_vlan.cblock.murano.sahara.ceil' 'controller controller compute mongo mongo cinder cinder-block-device' 'primary-controller controller compute primary-mongo mongo cinder cinder-block-device'
clean_env 'test_neutron_vlan'
# Neutron-dvr vlan
@ -254,32 +281,32 @@ fuel env --create --name test_neutron_vlan --rel 2 --net vlan
generate_yamls 'test_neutron_vlan' 'neut_vlan.dvr' 'controller controller controller' 'primary-controller'
clean_env 'test_neutron_vlan'
# Neutron vlan VMware vCenter + VMware Glance
fuel env --create --name test_neutron_vlan --rel 2 --net vlan
generate_yamls 'test_neutron_vlan' 'neut_vlan.vmware.glance' 'controller controller controller' 'primary-controller'
clean_env 'test_neutron_vlan'
# Neutron vlan VMware vCenter + cinder-vmware + compute-vmware
fuel env --create --name test_neutron_vlan --rel 2 --net vlan
generate_yamls 'test_neutron_vlan' 'neut_vlan.vmware.cinder-vmware.compute-vmware' 'controller controller controller cinder-vmware' 'primary-controller compute-vmware cinder-vmware'
clean_env 'test_neutron_vlan'
# Neutron tun addons + ceph
fuel env --create --name test_neutron_tun --rel 2 --net tun
generate_yamls 'test_neutron_tun' 'neut_tun.ceph.murano.sahara.ceil' 'controller controller compute ceph-osd ceph-osd mongo mongo' 'primary-controller controller compute ceph-osd primary-mongo mongo'
clean_env 'test_neutron_tun'
# Neutron tun ironic
fuel env --create --name test_neutron_tun --rel 2 --net tun
generate_yamls 'test_neutron_tun' 'neut_tun.ironic' 'controller ironic' 'primary-controller ironic'
clean_env 'test_neutron_tun'
# Neutron vlan ironic
fuel env --create --name test_neutron_vlan --rel 2 --net vlan
generate_yamls 'test_neutron_vlan' 'neut_tun.ironic' 'controller ironic' 'primary-controller ironic'
clean_env 'test_neutron_vlan'
# Neutron-l3ha tun
# Neutron-l3ha tun + nova_quota
fuel env --create --name test_neutron_tun --rel 2 --net tun
generate_yamls 'test_neutron_tun' 'neut_tun.l3ha' 'controller controller controller' 'primary-controller'
generate_yamls 'test_neutron_tun' 'neut_tun.l3ha.nova_quota' 'controller controller controller' 'primary-controller'
clean_env 'test_neutron_tun'
# Neutron tun + vms_conf
fuel env --create --name test_neutron_tun --rel 2 --net tun
generate_yamls 'test_neutron_tun' 'neut_tun.vms_conf' 'virt compute' 'virt'
clean_env 'test_neutron_tun'
# Multirack, Neutron tun, addons, ceph, public and horizon ssl
fuel env --create --name test_neutron_tun --rel 2 --net tun
update_default_nodegroup 'test_neutron_tun'
add_nodegroup 'test_neutron_tun' 'custom_group1'
generate_fake_nodes_fixtures 9.9.9.150 5 custom_nodegroup
create_fake_nodes custom_nodegroup
generate_yamls 'test_neutron_tun' 'neut_tun.multirack.murano.sahara.ceil.ceph.public_ssl' 'controller controller controller mongo mongo compute ceph-osd ceph-osd' 'primary-controller compute ceph-osd primary-mongo'
clean_env 'test_neutron_tun'