Even more cleanup of outdated stuff

This commit is contained in:
Mike Scherbakov 2013-10-01 00:06:44 +04:00
parent 4f1f173964
commit 5106ed9bd3
14 changed files with 10 additions and 2139 deletions

View File

@ -1,354 +0,0 @@
---
nodes:
- role: compute
network_data:
- name: public
ip: 172.18.94.39
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.39
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '1'
default_gateway: 172.18.94.33
uid: '1'
mac: 64:C3:54:54:D2:66
name: compute-01
ip: 172.18.94.39
profile: centos-x86_64
fqdn: compute-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.39
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: &18648020
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",\"volumes\":
[{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\":
\"mbr\"}, {\"size\": 20275, \"type\": \"pv\", \"vg\": \"os\"}],\"size\": 19232},{\"type\":
\"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\":
\"root\", \"size\": 19232}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\":
\"swap\", \"size\": 1024}]}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4\",
\"volumes\": [{\"type\": \"mbr\"}, {\"size\": 20476, \"type\": \"pv\", \"vg\":
\"cinder-volumes\"}], \"size\": 20476}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 172.18.94.34
interfaces:
eth0:
ip_address: 172.18.94.39
netmask: 255.255.255.0
dns_name: compute-01.domain.tld
static: '1'
mac_address: 64:C3:54:54:D2:66
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: &18667760
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
- role: primary-controller
network_data:
- name: public
ip: 172.18.94.41
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.41
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '2'
default_gateway: 172.18.94.33
uid: '2'
mac: 64:48:7A:14:83:E8
name: controller-01
ip: 172.18.94.41
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.41
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.41
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:48:7A:14:83:E8
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: controller
network_data:
- name: public
ip: 172.18.94.42
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.42
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '3'
default_gateway: 172.18.94.33
uid: '3'
mac: 64:B7:37:B1:1D:C9
name: controller-02
ip: 172.18.94.42
profile: centos-x86_64
fqdn: controller-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.42
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.42
netmask: 255.255.255.0
dns_name: controller-02.domain.tld
static: '1'
mac_address: 64:B7:37:B1:1D:C9
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: controller
network_data:
- name: public
ip: 172.18.94.36
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.36
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '4'
default_gateway: 172.18.94.33
uid: '4'
mac: 64:F4:64:E7:50:D3
name: controller-03
ip: 172.18.94.36
profile: centos-x86_64
fqdn: controller-03.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.36
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.36
netmask: 255.255.255.0
dns_name: controller-03.domain.tld
static: '1'
mac_address: 64:F4:64:E7:50:D3
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
attributes:
master_ip: 172.18.94.34
use_cow_images: true
libvirt_type: kvm
dns_nameservers: 172.18.94.34
verbose: true
debug: true
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.107.2.254
public_vip: 172.18.94.46
novanetwork_parameters:
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
quantum_parameters:
tenant_network_type: gre
segment_range: 300:500
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 172.18.94.48/28
fixed_network_range: 10.107.2.0/24
base_syslog:
syslog_port: '514'
syslog_server: 172.18.94.34
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: false
cinder_nodes:
- controller
ntp_servers:
- pool.ntp.org
deployment_id: 1
deployment_mode: ha
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -1,462 +0,0 @@
#Nodes array. Includes references to corresponding nodes' sections.
nodes:
#Simple node declaration. Includes YAML reference referred in `nodes` section
node_01: &node_01
# == role
# Specifies role of the node
# [primary-controller|controller|storage|swift-proxy|primary-swift-proxy]
# Default: unspecified
role: primary-controller
# == network_data
# Array of network interfaces hashes
# === name: scalar or array of one or more of [management|fixed|public|storage|admin(**deprecated)|floating(**deprecated)]
# === ip: IP address to be configured by puppet on this interface
# === dev: interface device name
# === netmask: network mask for the interface
# === vlan: vlan ID for the interface
# === gateway: IP address of gateway (**not used**)
network_data:
- name: public
ip: 10.20.0.94
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.94
netmask: 255.255.255.0
dev: eth1
- name: fixed
dev: eth2
# == public_br
# Name of the public bridge for Quantum-enabled configuration
public_br: br-ex,
# == internal_br
# Name of the internal bridge for Quantum-enabled configuration
internal_br: br-mgmt
# == id ** TO BE DOCUMENTED. Suspected: node id in mcollective server.cfg.
id: 1
# == default_gateway
# Default gateway for the node
default_gateway: 10.20.0.1
# == id ** TO BE DOCUMENTED
uid: 1
# == mac
# MAC address of the interface being used for network boot.
mac: 64:43:7B:CA:56:DD
# == name
# name of the system in cobbler
name: controller-01
# == ip
# IP issued by cobbler DHCP server to this node during network boot.
ip: 10.20.0.94
# == profile
# Cobbler profile for the node.
# Default: centos-x86_64
# [centos-x86_64|rhel-x86_64]
# CAUTION:
# rhel-x86_64 is created only after rpmcache class is run on master node
profile: centos-x86_64
# == fqdn
# Fully-qualified domain name of the node
fqdn: controller-01.domain.tld
# == power_type
# Cobbler power-type. Consult cobbler documentation for available options.
# Default: ssh
power_type: ssh
# == power_user
# Username for cobbler to manage power of this machine
# Default: unset
power_user: root
# == power_pass
# Password/credentials for cobbler to manage power of this machine
# Default: unset
power_pass: /root/.ssh/bootstrap.rsa
# == power_address
# IP address of the device managing the node power state.
# Default: unset
power_address: 10.20.0.94
# == netboot_enabled
# Disable/enable netboot for this node.
netboot_enabled: '1'
# == name_servers
# DNS name servers for this node during provisioning phase.
name_servers: ! '"10.20.0.2"'
# == puppet_master
# Hostname or IP address of puppet master node
puppet_master: fuel.domain.tld
# == ks_meta
# Kickstart metadata used during provisioning
ks_meta:
# == ks_spaces
# Kickstart data for disk partitioning
# The simplest way to calculate is to use REST call to nailgun api,
# recalculate disk size into MiB and dump the following config. Workflow is as follows:
# GET request to http://<fuel-master-node>:8000/api/nodes
# Parse JSON and derive disk data from meta['disks']. Set explicitly which disk is system and which is for cinder.
# $system_disk_size=floor($system_disk_meta['disks']['size']/1048756)
# $system_disk_path=$system_disk_meta['disks']['disk']
# $cinder_disk_size=floor($cinder_disk_meta['disks']['size']/1048756)
# $cinder_disk_path=$cinder_disk_meta['disks']['disk']
#
# All further calculations are made in MiB
# Calculation of system partitions
#
# For each node:
# calculate size of physical volume for operating system:
# $pv_size = $system_disk_size - 200 - 1
# declare $swap_size
# calculate size of root partition:
# $free_vg_size = $pv_size - $swap_size
# $free_extents = floor($free_vg_size/32)
# $system_disk_size = 32 * $free_extents
# ks_spaces: '"[
# {\"type\": \"disk\",
# \"id\": $system_disk_path,
# \"volumes\":[
# {\"mount\": \"/boot\",
\"type\": \"partition\",
\"size\": 200},
# {\"type\": \"mbr\"},
# {\"vg\": \"os\",
\"type\": \"pv\",
\"size\": $pv_size}
# ],
# \"size\": $system_disk_size
# },
#{\"type\": \"vg\", \"id\": \"os\", \"volumes\":
#[
# {\"mount\": \"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": $system_disk_size },
# {\"mount\": \"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": $swap_size}
#]
#},
#{\"type\": \"disk\", \"id\": \"$path_to_cinder_disk\",
#\"volumes\":
#[
# {\"type\": \"mbr\"},
# {\"size\": $cinder_disk_size, \"type\": \"pv\", \"vg\": \"cinder-volumes\"}
#],
#\"size\": $cinder_disk_size
#}
#]"'
ks_spaces: '"[
{\"type\": \"disk\",
\"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [
{\"mount\": \"/boot\",
\"type\": \"partition\",
\"size\": 200},
{\"type\": \"mbr\"},
{\"size\": 20000,
\"type\": \"pv\",
\"vg\": \"os\"}
],
\"size\": 20480},
{\"type\": \"vg\",
\"id\": \"os\",
\"volumes\": [
{\"mount\": \"/\",
\"type\": \"lv\",
\"name\": \"root\",
\"size\": 10240},
{\"mount\": \"swap\",
\"type\": \"lv\",
\"name\": \"swap\",
\"size\": 2048}
]}
]"'
# == mco_enable
# If mcollective should be installed and enabled on the node
mco_enable: 1
# == mco_vhost
# Mcollective AMQP virtual host
mco_vhost: mcollective
# == mco_pskey
# **NOT USED**
mco_pskey: unset
# == mco_user
# Mcollective AMQP user
mco_user: mcollective
# == puppet_enable
# should puppet agent start on boot
# Default: 0
puppet_enable: 0
# == install_log_2_syslog
# Enable/disable on boot remote logging
# Default: 1
install_log_2_syslog: 1
# == mco_password
# Mcollective AMQP password
mco_password: marionette
# == puppet_auto_setup
# Whether to install puppet during provisioning
# Default: 1
puppet_auto_setup: 1
# == puppet_master
# hostname or IP of puppet master server
puppet_master: fuel.domain.tld
# == puppet_auto_setup
# Whether to install mcollective during provisioning
# Default: 1
mco_auto_setup: 1
# == auth_key
# Public RSA key to be added to cobbler authorized keys
auth_key: ! '""'
# == puppet_version
# Which puppet version to install on the node
puppet_version: 2.7.19
# == mco_connector
# Mcollective AMQP driver.
# Default: rabbitmq
mco_connector: rabbitmq
# == mco_host
# AMQP host to which Mcollective agent should connect
mco_host: 10.20.0.2
# == interfaces
# Hash of interfaces configured during provision state
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
# == interfaces_extra
# extra interfaces information
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
# == meta
# Outdated stuff needed for log parsing during astute jobs.
meta:
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
#Openstack cluster attributes used during deployment.
attributes:
# == master_ip
# IP of puppet master.
master_ip: 10.20.0.2
# == use_cow_images:
# Whether to use cow images
use_cow_images: true
# == libvirt_type
# Nova libvirt hypervisor type
# Values: qemu|kvm
# Default: kvm
libvirt_type: qemu
# == dns_nameservers
# array of DNS servers configured during deployment phase.
dns_nameservers:
- 10.20.0.1
# This parameter specifies the verbosity level of log messages
# in openstack components config.
# Debug would have set DEBUG level and ignore verbose settings, if any.
# Verbose would have set INFO level messages
# In case of non debug and non verbose - WARNING, default level would have set.
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option.
# == verbose
# whether to enable verbosity
# Default: true
verbose: true
# == debug
# whether to enable debug
# Default: false
debug: true
# == auto_assign_floating_ip
# Whether to assign floating IPs automatically
auto_assign_floating_ip: true
# == start_guests_on_host_boot
# Default: true
start_guests_on_host_boot: true
# == create_networks
# whether to create fixed or floating networks
create_networks: true
# == compute_scheduler_driver
# Nova scheduler driver class
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
# == quantum
# Whether quantum is enabled
# Default: true
quantum: true
# == master_hostname
# Which controller node to treat as master node. Used only certainty during deployment.
master_hostname: controller-01
# == nagios
# Whether to enable nagios clients on the nodes
nagios: false
# == proj_name
# name of nagios project
proj_name: test
# == nagios_master
# nagios master server name
nagios_master: fuelweb.domain.tld
# == management_vip
# Virtual IP address for internal services (MySQL, AMQP, internal OpenStack endpoints)
management_vip: 10.20.1.200
# == public_vip
# Virtual IP address for public services (Horizon, public OpenStack endpoints)
public_vip: 10.20.0.200
#Nova-network part, gets ignored if $quantum = `false`
novanetwork_parameters:
vlan_start: <1-1024>
# == network_manager
# Which nova-network manager to use
network_manager: String
# == network_size
# which network size to use during fixed network range segmentation
network_size: <Integer>
#Quantum part, used only if quantum='true'
quantum_parameters:
# == tenant_network_type
# Which type of network segmentation to use.
# Values: gre|vlan
tenant_network_type: gre
# == segment_range
# Range of IDs for network segmentation. Consult Quantum documentation.
# Values: gre|vlan
segment_range: ! '300:500'
# == metadata_proxy_shared_secret
# Shared secret for metadata proxy services
# Values: gre|vlan
metadata_proxy_shared_secret: quantum
# Below go credentials and access parameters for main OpenStack components
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
# == floating_network_range
# CIDR (for quantum == true) or array if IPs (for quantum == false)
# Used for creation of floating networks/IPs during deployment
floating_network_range: 10.20.0.150/26
# == fixed_network_range
# CIDR for fixed network created during deployment.
fixed_network_range: 10.20.2.0/24
# == base_syslog
# Main syslog server configuration.
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
# == syslog
# Additional syslog servers configuration.
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
# == use_unicast_corosync
# which communaction protocol to use for corosync
use_unicast_corosync: false
# == horizon_use_ssl
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
horizon_use_ssl: false
# == cinder_nodes
# Which nodes to use as cinder-volume backends
# Array of values 'all'|<hostname>|<internal IP address of node>|'controller'|<node_role>
cinder_nodes:
- controller
# == ntp_servers
# List of ntp servers
ntp_servers:
- pool.ntp.org
# == deployment_id
# Id if deployment used do differentiate environments
deployment_id: 1
# == deployment_mode
# [ha|ha_full|multinode|single|ha_minimal]
deployment_mode: ha
# == deployment_source
# [web|cli]
deployment_source: cli
# == deployment_engine
# [simplepuppet(**deprecated**)|nailyfact]
# Default: nailyfact
deployment_engine: nailyfact
#Cobbler engine parameters
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -1,614 +0,0 @@
---
nodes:
- role: compute
network_data:
- name: public
ip: 172.18.94.39
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.39
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '1'
default_gateway: 172.18.94.33
uid: '1'
mac: 64:C3:54:54:D2:66
name: compute-01
ip: 172.18.94.39
profile: centos-x86_64
fqdn: compute-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.39
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: &18648020
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",\"volumes\":
[{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\":
\"mbr\"}, {\"size\": 20275, \"type\": \"pv\", \"vg\": \"os\"}],\"size\": 19232},{\"type\":
\"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\":
\"root\", \"size\": 19232}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\":
\"swap\", \"size\": 1024}]}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4\",
\"volumes\": [{\"type\": \"mbr\"}, {\"size\": 20476, \"type\": \"pv\", \"vg\":
\"cinder-volumes\"}], \"size\": 20476}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 172.18.94.34
interfaces:
eth0:
ip_address: 172.18.94.39
netmask: 255.255.255.0
dns_name: compute-01.domain.tld
static: '1'
mac_address: 64:C3:54:54:D2:66
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: &18667760
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
- role: primary-controller
network_data:
- name: public
ip: 172.18.94.41
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.41
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '2'
default_gateway: 172.18.94.33
uid: '2'
mac: 64:48:7A:14:83:E8
name: controller-01
ip: 172.18.94.41
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.41
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.41
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:48:7A:14:83:E8
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: controller
network_data:
- name: public
ip: 172.18.94.42
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.42
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '3'
default_gateway: 172.18.94.33
uid: '3'
mac: 64:B7:37:B1:1D:C9
name: controller-02
ip: 172.18.94.42
profile: centos-x86_64
fqdn: controller-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.42
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.42
netmask: 255.255.255.0
dns_name: controller-02.domain.tld
static: '1'
mac_address: 64:B7:37:B1:1D:C9
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: controller
network_data:
- name: public
ip: 172.18.94.36
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.36
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '4'
default_gateway: 172.18.94.33
uid: '4'
mac: 64:F4:64:E7:50:D3
name: controller-03
ip: 172.18.94.36
profile: centos-x86_64
fqdn: controller-03.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.36
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.36
netmask: 255.255.255.0
dns_name: controller-03.domain.tld
static: '1'
mac_address: 64:F4:64:E7:50:D3
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: storage
network_data:
- name: public
ip: 172.18.94.43
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.43
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '7'
default_gateway: 172.18.94.33
uid: '7'
mac: 64:57:26:83:1D:CA
name: swift-01
ip: 172.18.94.43
profile: centos-x86_64
fqdn: swift-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.43
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.43
netmask: 255.255.255.0
dns_name: swift-01.domain.tld
static: '1'
mac_address: 64:57:26:83:1D:CA
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: storage
network_data:
- name: public
ip: 172.18.94.47
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.47
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '8'
default_gateway: 172.18.94.33
uid: '8'
mac: 64:DC:FD:AD:EB:4E
name: swift-02
ip: 172.18.94.47
profile: centos-x86_64
fqdn: swift-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.47
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.47
netmask: 255.255.255.0
dns_name: swift-02.domain.tld
static: '1'
mac_address: 64:DC:FD:AD:EB:4E
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: storage
network_data:
- name: public
ip: 172.18.94.44
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.44
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '9'
default_gateway: 172.18.94.33
uid: '9'
mac: 64:EA:DF:59:79:39
name: swift-03
ip: 172.18.94.44
profile: centos-x86_64
fqdn: swift-03.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.44
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.44
netmask: 255.255.255.0
dns_name: swift-03.domain.tld
static: '1'
mac_address: 64:EA:DF:59:79:39
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: primary-swift-proxy
network_data:
- name: public
ip: 172.18.94.40
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.40
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '6'
default_gateway: 172.18.94.33
uid: '6'
mac: 64:BC:C3:9C:07:26
name: swiftproxy-01
ip: 172.18.94.40
profile: centos-x86_64
fqdn: swiftproxy-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.40
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.40
netmask: 255.255.255.0
dns_name: swiftproxy-01.domain.tld
static: '1'
mac_address: 64:BC:C3:9C:07:26
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: swift-proxy
network_data:
- name: public
ip: 172.18.94.45
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.45
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '5'
default_gateway: 172.18.94.33
uid: '5'
mac: 64:97:93:5F:B2:DC
name: swiftproxy-02
ip: 172.18.94.45
profile: centos-x86_64
fqdn: swiftproxy-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.45
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.45
netmask: 255.255.255.0
dns_name: swiftproxy-02.domain.tld
static: '1'
mac_address: 64:97:93:5F:B2:DC
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
attributes:
master_ip: 172.18.94.34
use_cow_images: true
libvirt_type: kvm
dns_nameservers: 172.18.94.34
verbose: true
debug: true
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.107.2.254
public_vip: 172.18.94.46
novanetwork_parameters:
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
quantum_parameters:
tenant_network_type: gre
segment_range: 300:500
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 172.18.94.48/28
fixed_network_range: 10.107.2.0/24
base_syslog:
syslog_port: '514'
syslog_server: 172.18.94.34
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: false
cinder_nodes:
- controller
ntp_servers:
- pool.ntp.org
deployment_id: 1
deployment_mode: ha_full
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -1,249 +0,0 @@
---
nodes:
- role: compute
network_data:
- name: public
ip: 10.20.0.122
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.122
dev: eth0
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '1'
default_gateway: 10.20.0.1
uid: '1'
mac: 64:7D:B8:84:64:79
name: compute-01
ip: 10.20.0.122
profile: centos-x86_64
fqdn: compute-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.122
netboot_enabled: '1'
name_servers: 10.20.0.2
puppet_master: fuel.domain.tld
ks_meta: &17570000
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",\"volumes\":
[{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\":
\"mbr\"}, {\"size\": 20275, \"type\": \"pv\", \"vg\": \"os\"}],\"size\": 19232},{\"type\":
\"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\":
\"root\", \"size\": 19232}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\":
\"swap\", \"size\": 1024}]}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4\",
\"volumes\": [{\"type\": \"mbr\"}, {\"size\": 20476, \"type\": \"pv\", \"vg\":
\"cinder-volumes\"}], \"size\": 20476}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.122
netmask: 255.255.255.0
dns_name: compute-01.domain.tld
static: '1'
mac_address: 64:7D:B8:84:64:79
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: &17588060
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
- role: primary-controller
network_data:
- name: public
ip: 10.20.0.94
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.94
dev: eth0
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '2'
default_gateway: 10.20.0.1
uid: '2'
mac: 64:43:7B:CA:56:DD
name: controller-01
ip: 10.20.0.94
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.94
netboot_enabled: '1'
name_servers: 10.20.0.2
puppet_master: fuel.domain.tld
ks_meta: *17570000
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *17588060
error_type:
attributes:
use_cow_images: true
libvirt_type: kvm
dns_nameservers: 10.20.0.2
verbose: true
debug: true
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.20.1.200
public_vip: 10.20.0.200
novanetwork_parameters:
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
quantum_parameters:
tenant_network_type: gre
segment_range: 300:500
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 10.20.0.150/28
fixed_network_range: 10.20.1.0/24
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: false
cinder_nodes:
- controller
ntp_servers:
- pool.ntp.org
deployment_id: 1
deployment_mode: ha
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -1,249 +0,0 @@
---
nodes:
- role: primary-controller
network_data:
- name: public
ip: 10.20.0.94
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.94
dev: eth0
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '2'
default_gateway: 10.20.0.1
uid: '2'
mac: 64:43:7B:CA:56:DD
name: controller-01
ip: 10.20.0.94
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.94
netboot_enabled: '1'
name_servers: 10.20.0.2
puppet_master: fuel.domain.tld
ks_meta: *17570000
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *17588060
error_type:
- role: compute
network_data:
- name: public
ip: 10.20.0.122
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.122
dev: eth0
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '1'
default_gateway: 10.20.0.1
uid: '1'
mac: 64:7D:B8:84:64:79
name: compute-01
ip: 10.20.0.122
profile: centos-x86_64
fqdn: compute-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.122
netboot_enabled: '1'
name_servers: 10.20.0.2
puppet_master: fuel.domain.tld
ks_meta: &17570000
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",\"volumes\":
[{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\":
\"mbr\"}, {\"size\": 20275, \"type\": \"pv\", \"vg\": \"os\"}],\"size\": 19232},{\"type\":
\"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\":
\"root\", \"size\": 19232}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\":
\"swap\", \"size\": 1024}]}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4\",
\"volumes\": [{\"type\": \"mbr\"}, {\"size\": 20476, \"type\": \"pv\", \"vg\":
\"cinder-volumes\"}], \"size\": 20476}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.122
netmask: 255.255.255.0
dns_name: compute-01.domain.tld
static: '1'
mac_address: 64:7D:B8:84:64:79
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: &17588060
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
attributes:
use_cow_images: true
libvirt_type: kvm
dns_nameservers: 10.20.0.2
verbose: true
debug: true
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.20.1.200
public_vip: 10.20.0.200
novanetwork_parameters:
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
quantum_parameters:
tenant_network_type: gre
segment_range: 300:500
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 10.20.0.150/28
fixed_network_range: 10.20.1.0/24
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: false
cinder_nodes:
- controller
ntp_servers:
- pool.ntp.org
deployment_id: 1
deployment_mode: ha
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -5,6 +5,7 @@
Other Questions
===============
.. TODO(mihgen): Provide more clear and reflecting reality answer
1. **[Q]** Why did you decide to provide OpenStack packages through your own
repository?

View File

@ -11,6 +11,7 @@ Sizing Hardware for Production Deployment
.. contents :local:
.. TODO(mihgen): Add link to Hardware calculator on Mirantis site
One of the first questions people ask when planning an OpenStack deployment is
"what kind of hardware do I need?" There is no such thing as a one-size-fits-all
answer, but there are straightforward rules to selecting appropriate hardware
@ -281,4 +282,4 @@ from Dell for compute nodes include:
You may also want to consider systems from HP (http://www.hp.com/servers) or
from a smaller systems builder like Aberdeen, a manufacturer that specializes
in powerful, low-cost systems and storage servers (http://www.aberdeeninc.com).
in powerful, low-cost systems and storage servers (http://www.aberdeeninc.com).

View File

@ -1,109 +0,0 @@
.. raw:: pdf
PageBreak
.. index:: Redeploying An Environment
.. _Redeploying_An_Environment:
Redeploying An Environment
==========================
.. contents :local:
Because Puppet is additive only, there is no ability to revert changes as you
would in a typical application deployment. If a change needs to be backed out,
you must explicitly add a configuration to reverse it, check the configuration
in, and promote it to production using the pipeline. This means that if a
breaking change does get deployed into production, typically a manual fix is
applied, with the proper fix subsequently checked into version control.
Fuel offers the ability to isolate code changes while developing a deployment
and minimizes the headaches associated with maintaining multiple configurations
through a single Puppet Master by creating what are called environments.
Environments
------------
Puppet supports assigning nodes 'environments'. These environments can be
mapped directly to your development, QA and production life cycles, so its a
way to distribute code to nodes that are assigned to those environments.
**On the Master node:**
The Puppet Master tries to find modules using its ``modulepath`` setting,
which by default is ``/etc/puppet/modules``. It is common practice to set
this value once in your ``/etc/puppet/puppet.conf``. Environments expand on
this idea and give you the ability to use different settings for different
configurations.
For example, you can specify several search paths. The following example
dynamically sets the ``modulepath`` so Puppet will check a per-environment
folder for a module before serving it from the main set:
.. code-block:: ini
[master]
modulepath = $confdir/$environment/modules:$confdir/modules
[production]
manifest = $confdir/manifests/site.pp
[development]
manifest = $confdir/$environment/manifests/site.pp
**On the Slave Node:**
Once the slave node makes a request, the Puppet Master gets informed of its
environment. If you dont specify an environment, the agent uses the default
``production`` environment.
To set aslave-side environment, just specify the environment setting in the
``[agent]`` block of ``puppet.conf``:
.. code-block:: ini
[agent]
environment = development
Deployment pipeline
-------------------
1. Deploy
In order to deploy multiple environments that don't interfere with each other,
you should specify the ``deployment_id`` option in YAML file.
It should be an even integer value in the range of 2-254.
This value is used in dynamic environment-based tag generation. Fuel applies
that tag globally to all resources and some services on each node.
2. Clean/Revert
At this stage you just need to make sure the environment has the
original/virgin state.
3. Puppet node deactivate
This will ensure that any resources exported by that node will stop appearing
in the catalogs served to the slave nodes::
puppet node deactivate <node>
where ``<node>`` is the fully qualified domain name as seen in
``puppet cert list --all``.
You can deactivate nodes manually one by one, or execute the following
command to automatically deactivate all nodes::
cert list --all | awk '! /DNS:puppet/ { gsub(/"/, "", $2); print $2}' | xargs puppet node deactivate
4. Redeploy
Start the puppet agent again to apply a desired node configuration.
.. seealso::
http://puppetlabs.com/blog/a-deployment-pipeline-for-infrastructure/
http://docs.puppetlabs.com/guides/environment.html

View File

@ -6,78 +6,4 @@
.. _Large_Scale_Deployments:
Large Scale Deployments
=======================
When deploying large clusters (of 100 nodes or more) there are two basic
bottlenecks:
Careful planning is key to eliminating these potential problem areas, but
there's another way.
Fuel takes care of these problems through caching and orchestration. We feel,
however, that it's always good to have a sense of how to solve these problems
should they appear.
Certificate signing requests and Puppet Master/Cobbler capacity
---------------------------------------------------------------
When deploying a large cluster, you may find that Puppet Master begins to have
difficulty when you start exceeding 20 or more simultaneous requests. Part of
this problem is because the initial process of requesting and signing
certificates involves \*.tmp files that can create conflicts. To solve this
problem, you have two options:
* reduce the number of simultaneous requests,
* or increase the number of Puppet Master/Cobbler servers.
The number of simultaneous certificate requests that are active can be
controlled by staggering the Puppet agent run schedule. This can be
accomplished through orchestration. You don't need extreme staggering (1 to 5
seconds will do) but if this method isn't practical, you can increase the number
of Puppet Master/Cobbler servers.
If you're simply overwhelming the Puppet Master process and not running into
file conflicts, one way to get around this problem is to use Puppet Master with
Thin as the backend component and nginx as a frontend component. This
configuration dynamically scales the number of Puppet Master processes to better
accommodate changing load.
.. You can find sample configuration files for nginx and puppetmasterd at [CONTENT NEEDED HERE].
You can also increase the number of servers by creating a cluster that utilizes
a round robin DNS configuration through a service like HAProxy. You will need
to ensure that these nodes are kept in sync. For Cobbler, that means a
combination of the ``--replicate`` switch, XMLRPC for metadata, rsync for
profiles and distributions. Similarly, Puppet Master can be kept in sync with a
combination of rsync (for modules, manifests, and SSL data) and database
replication.
..
image:: /_images/cobbler-puppet-ha.jpg
:align: center
Downloading of operating systems and other software
---------------------------------------------------
Large deployments can also suffer from a bottleneck in terms of the additional
traffic created by downloading software from external sources. One way to avoid
this problem is by increasing LAN bandwidth through bonding multiple gigabit
interfaces. You might also want to consider 10G Ethernet trunking between
infrastructure switches using CAT-6a or fiber cables to improve backend speeds
to reduce latency and provide more overall pipe.
.. seealso:: :ref:`Sizing_Hardware` for more information on choosing networking equipment.
..
Another option is to prevent the need to download so much data in the first place
using either apt-cacher to cache frequently downloaded packages or to set up a
private repository. The downside of using your own repository, however, is that
you have to spend more time manually updating it. Apt-cacher automates this
process. To use apt-cacher, the kickstart that Cobbler sends to each node
should specify Cobbler's IP address and the apt-cacher port as the proxy server.
This will prevent all of the nodes from having to download the software
individually.
`Contact Mirantis <http://www.mirantis.com/contact/>`_ for information on
creating a private repository.
.. TODO(mihgen): Fill in this section. It needs to be completely rewritten.

View File

@ -20,15 +20,15 @@ As you know, OpenStack provides the following basic services:
**Compute:**
Compute servers are the workhorses of your installation; they're
the servers on which your users' virtual machines are created.
`nova-scheduler` controls the life-cycle of these VMs.
`nova-compute` controls the life-cycle of these VMs.
**Networking:**
Because an OpenStack cluster (virtually) always includes
multiple servers, the ability for them to communicate with each other and with
the outside world is crucial. Networking was originally handled by the
`nova-network` service, but it has given way to the newer Neutron (formerly
Quantum) networking service. Authentication and authorization for these
transactions are handled by `keystone`.
Quantum) networking service. `nova-network` still has some advantages over Neutron,
and it is supported by Fuel in both Flat-DHCP and VLAN modes.
**Storage:**
OpenStack provides for two different types of storage: block

View File

@ -6,8 +6,8 @@
.. _Simple:
Simple (non-HA) Deployment
==========================
Simple (no High Availability) Deployment
========================================
In a production environment, you will never have a Simple non-HA
deployment of OpenStack, partly because it forces you to make a number

View File

@ -1,19 +0,0 @@
.. index:: Cinder vs. nova-volume
Cinder vs. nova-volume
----------------------
Cinder is a persistent storage management service, also known as
block-storage-as-a-service. It was created to replace nova-volume, and
provides persistent storage for VMs.
If you want to use Cinder for persistent storage, you will need to both
enable Cinder and create the block devices on which it will store data.
You will then provide information about those blocks devices during the Fuel
install.
Cinder block devices can be:
* created by Cobbler during the initial node installation, or
* attached manually (e.g. as additional virtual disks if you are using
VirtualBox, or as additional physical RAID, SAN volumes)

View File

@ -5,6 +5,7 @@
Object Storage Deployment
-------------------------
.. TODO(mihgen): we need to rewrite this and add info about Ceph
Fuel currently supports several scenarios to deploy the object storage:
**Glance + filesystem**

View File

@ -19,5 +19,3 @@ hardware and how to handle large-scale deployments.
:depth: 2
.. include:: /pages/production-considerations/0015-sizing-hardware.rst
.. include:: /pages/production-considerations/0020-deployment-pipeline.rst
.. include:: /pages/production-considerations/0030-large-deployments.rst