Merge branch 'master' into librarian

This commit is contained in:
Łukasz Oleś 2015-10-21 14:41:58 +02:00
commit 96dff2cb0c
29 changed files with 677 additions and 4 deletions

View File

@ -296,3 +296,14 @@ Full documentation of individual functions is found in the `solar/template.py` f
# Customizing vagrant-settings.yaml
Solar is shipped with sane defaults in `vagrant-setting.yaml_defaults`. If you need to adjust them for your needs, e.g. changing resource allocation for VirtualBox machines, you should just compy the file to `vagrant-setting.yaml` and make your modifications.
# Image based provisioning with Solar
* In `vagrant-setting.yaml_defaults` or `vagrant-settings.yaml` file uncomment `preprovisioned: false` line.
* Run `vagrant up`, it will take some time because it builds image for bootstrap.
* Currently in order to perform provisioning, pre-built images from Fuel can be used
* Download images [using this link](https://drive.google.com/file/d/0B7I3b5vI7ZYXM0FPTDJEdjg0Qnc/view).
* Login into vm `vagrant ssh solar-dev`
* Go to `cd /vagrant/tmp/` directory
* Untar the images `tar vxf targetimages.tar`
* Now you can run provisioning `/vagrant/examples/provisioning/provision.sh`

1
Vagrantfile vendored
View File

@ -152,6 +152,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
"--memory", SLAVES_RAM,
"--cpus", SLAVES_CPUS,
"--ioapic", "on",
"--macaddress1", "auto",
]
if PARAVIRT_PROVIDER
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest

View File

@ -6,7 +6,7 @@ bind-interfaces
dhcp-range={{dhcp_range_start}},{{dhcp_range_end}},12h
# Net boot file name
dhcp-boot=tag:!nopxe,pxelinux.0
dhcp-boot=net:!nopxe,pxelinux.0
# Configure tftp
enable-tftp

View File

@ -0,0 +1,8 @@
server {
listen 8001;
root /vagrant;
location / {
autoindex on;
}
}

View File

@ -47,4 +47,10 @@
# Configure http server to load root
- apt: name=nginx state=present
- template: src=files/nginx.cfg dest=/etc/nginx/conf.d/pxe_image.conf
# Configure http server in order serve file in '/vagrant' directory
- template: src=files/nginx_vagrant_dir.cfg dest=/etc/nginx/conf.d/vagrant_dir.conf
- service: name=nginx state=restarted
# Install discovery service
- shell: pip install git+https://github.com/rustyrobot/discovery.git
- shell: 'discovery &'

View File

@ -35,8 +35,9 @@ NETWORK_METADATA = yaml.load("""
def deploy():
db.clear()
resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 1})
first_node = next(x for x in resources if x.name.startswith('node'))
resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 2})
first_node, second_node = [x for x in resources if x.name.startswith('node')]
first_transp = next(x for x in resources if x.name.startswith('transport'))
library = vr.create('library1', 'resources/fuel_library', {})[0]
first_node.connect(library)
@ -44,6 +45,13 @@ def deploy():
keys = vr.create('ceph_key', 'resources/ceph_keys', {})[0]
first_node.connect(keys)
remote_file = vr.create('ceph_key2', 'resources/remote_file',
{'dest': '/var/lib/astute/'})[0]
second_node.connect(remote_file)
keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'})
first_transp.connect(remote_file, {'transports': 'remote'})
ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon',
{'storage': STORAGE,
'keystone': KEYSTONE,

View File

@ -0,0 +1,36 @@
#!/usr/bin/env python
import requests
from solar.core import resource
from solar.core import signals
from solar.core import validation
from solar.core.resource import virtual_resource as vr
from solar.events.controls import React
from solar.events.api import add_event
discovery_service = 'http://0.0.0.0:8881'
nodes_list = requests.get(discovery_service).json()
# Create slave node resources
node_resources = vr.create('nodes', 'templates/not_provisioned_nodes.yaml', {'nodes': nodes_list})
# Get master node
master_node = filter(lambda n: n.name == 'node_master', node_resources)[0]
# Dnsmasq resources
for node in nodes_list:
dnsmasq = vr.create('dnsmasq_{0}'.format(node['mac'].replace(':', '_')), 'resources/dnsmasq', {})[0]
node = filter(lambda n: n.name.endswith('node_{0}'.format(node['mac']).replace(':', '_')), node_resources)[0]
master_node.connect(dnsmasq)
node.connect(dnsmasq, {'admin_mac': 'exclude_mac_pxe'})
event = React(node.name, 'run', 'success', node.name, 'provision')
add_event(event)
event = React(node.name, 'provision', 'success', dnsmasq.name, 'exclude_mac_pxe')
add_event(event)
event = React(dnsmasq.name, 'exclude_mac_pxe', 'success', node.name, 'reboot')
add_event(event)

View File

@ -0,0 +1,17 @@
#!/bin/bash
set -eux
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Remove generated pxe exclude files
sudo rm -f /etc/dnsmasq.d/no_pxe_*.conf
sudo service dnsmasq restart
solar resource clear_all
python "${DIR}"/provision.py
solar changes stage
solar changes process
solar orch run-once last
watch --color -n1 'solar orch report last'

View File

@ -11,4 +11,7 @@ input:
key_name:
schema: str!
value: ceph
path:
schema: str!
value: /var/lib/astute/ceph/
tags: []

View File

@ -0,0 +1,17 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- lineinfile: create=yes dest=/etc/dnsmasq.d/no_pxe_{{exclude_mac_pxe | replace(':', '_')}}.conf line="dhcp-host={{exclude_mac_pxe}},set:nopxe"
# FIXME: currently there is no way to specify
# policy not to run several tasks in parallel,
# so when we deploy several nodes in parallel
# it causes the problems when two tasks try
# to restart supervisor at the same time, and
# fail to do it.
- command: service dnsmasq status
register: log
until: log.stdout.find('running') > -1
retries: 5
delay: 2
- shell: service dnsmasq restart

View File

@ -0,0 +1,2 @@
- hosts: [{{host}}]
sudo: yes

View File

@ -0,0 +1,18 @@
id: dnsmasq
handler: ansible
version: 1.0.0
actions:
exclude_mac_pxe: exclude_mac_pxe.yaml
run: run.yaml
input:
ip:
schema: str!
value:
exclude_mac_pxe:
schema: str!
value:
tags: [resources=dnsmasq]

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -eux
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# TODO should be a way to render configs, in order to do this
# we should have scripts dir variable passed from above
sed -i "s|<ROOT>|${DIR}|" "${DIR}"/templates/agent.config
provision --input_data_file "${DIR}"/templates/provisioning.json --config-file "${DIR}"/templates/agent.config

View File

@ -0,0 +1,6 @@
#!/bin/bash
set -eux
reboot now

View File

@ -0,0 +1,8 @@
#!/bin/bash
set -eux
# Fake run action which is required in order to make
# dependency `run` -> `provision`
exit 0

View File

@ -0,0 +1,25 @@
id: not_provisioned_node
handler: shell
version: 1.0.0
actions:
provision: provision.sh
run: run.sh
reboot: reboot.sh
input:
ip:
schema: str!
value:
admin_mac:
schema: str!
value:
name:
schema: str
value: a node
location_id:
schema: str!
value: $uuid
reverse: True
tags: [resources=node]

View File

@ -0,0 +1,2 @@
[DEFAULT]
nc_template_path=<ROOT>/templates/cloud-init-templates/

View File

@ -0,0 +1,55 @@
#cloud-boothook
#!/bin/bash
cloud-init-per instance disable_selinux_on_the_fly setenforce 0
cloud-init-per instance disable_selinux sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux
# configure udev rules
# udev persistent net
cloud-init-per instance udev_persistent_net1 service network stop
ADMIN_MAC={{ common.admin_mac }}
ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
cloud-init-per instance configure_admin_interface /bin/sh -c "echo -e \"# FROM COBBLER SNIPPET\nDEVICE=$ADMIN_IF\nIPADDR={{ common.admin_ip }}\nNETMASK={{ common.admin_mask }}\nBOOTPROTO=none\nONBOOT=yes\nUSERCTL=no\n\" | tee /etc/sysconfig/network-scripts/ifcfg-$ADMIN_IF"
cloud-init-per instance set_gateway /bin/sh -c 'echo GATEWAY="{{ common.gw }}" | tee -a /etc/sysconfig/network'
cloud-init-per instance udev_persistent_net5 service network start
# end of udev
#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned,
# cloud-init will start to generate resolv.conf with non-actual data
cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf
cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf'
cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip }} | tee -a /etc/resolv.conf'
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :)
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/rc.modules'
cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/rc.modules'
cloud-init-per instance conntrack_proto_gre /bin/sh -c 'echo nf_conntrack_proto_gre | tee -a /etc/rc.modules'
cloud-init-per instance chmod_rc_modules chmod +x /etc/rc.modules
cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf'
cloud-init-per instance kernel_panic /bin/sh -c 'echo "kernel.panic=60" | tee -a /etc/sysctl.conf'
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_proto_gre_load modprobe nf_conntrack_proto_gre
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60"
cloud-init-per instance mkdir_coredump mkdir -p /var/log/coredump
cloud-init-per instance set_coredump /bin/sh -c 'echo -e "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" | tee -a /etc/sysctl.conf'
cloud-init-per instance set_coredump_sysctl sysctl -w "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t"
cloud-init-per instance set_chmod chmod 777 /var/log/coredump
cloud-init-per instance set_limits /bin/sh -c 'echo -e "* soft core unlimited\n* hard core unlimited" | tee -a /etc/security/limits.conf'

View File

@ -0,0 +1,55 @@
#cloud-boothook
#!/bin/bash
# udev persistent net
cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop
ADMIN_MAC={{ common.admin_mac }}
ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
# Check if we do not already have static config (or interface seems unconfigured)
if [ ! -d "/etc/network/interfaces.d" ]; then
mkdir -p /etc/network/interfaces.d
echo 'source /etc/network/interfaces.d/*' > /etc/network/interfaces
fi
if [ ! -e "/etc/network/interfaces.d/ifcfg-$ADMIN_IF" ]; then
echo -e "auto $ADMIN_IF\niface $ADMIN_IF inet static\n\taddress {{ common.admin_ip }}\n\tnetmask {{ common.admin_mask }}\n\tgateway {{ common.gw }}" > /etc/network/interfaces.d/ifcfg-"$ADMIN_IF"
fi
cloud-init-per instance udev_persistent_net5 /etc/init.d/networking start
# end of udev
#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned,
# cloud-init will start to generate resolv.conf with non-actual data
cloud-init-per instance resolv_conf_mkdir mkdir -p /etc/resolvconf/resolv.conf.d
cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf
cloud-init-per instance resolv_conf_head_remove rm -f /etc/resolvconf/resolv.conf.d/head
cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf'
cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_head_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolvconf/resolv.conf.d/head'
cloud-init-per instance resolv_conf_head_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head'
cloud-init-per instance resolv_conf_head_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head'
cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_head_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolvconf/resolv.conf.d/head'
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) && update-initramfs -u -k all
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/modules'
cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/modules'
cloud-init-per instance conntrack_proto_gre /bin/sh -c 'echo nf_conntrack_proto_gre | tee -a /etc/modules'
cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf'
cloud-init-per instance kernel_panic /bin/sh -c 'echo "kernel.panic=60" | tee -a /etc/sysctl.conf'
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_proto_gre_load modprobe nf_conntrack_proto_gre
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60"
cloud-init-per instance dhclient /bin/sh -c 'echo "supersede routers 0;" | tee /etc/dhcp/dhclient.conf'

View File

@ -0,0 +1,26 @@
#cloud-config
resize_rootfs: false
growpart:
mode: false
disable_ec2_metadata: true
disable_root: false
# password: RANDOM
# chpasswd: { expire: True }
ssh_pwauth: false
ssh_authorized_keys:
{% for key in common.ssh_auth_keys %}
- {{ key }}
{% endfor %}
# set the locale to a given locale
# default: en_US.UTF-8
locale: en_US.UTF-8
timezone: {{ common.timezone }}
hostname: {{ common.hostname }}
fqdn: {{ common.fqdn }}
final_message: "YAY! The system is finally up, after $UPTIME seconds"

View File

@ -0,0 +1,25 @@
#cloud-config
resize_rootfs: false
growpart:
mode: false
disable_ec2_metadata: true
disable_root: false
user: root
password: r00tme
chpasswd: { expire: false }
ssh_pwauth: false
ssh_authorized_keys:
{% for key in common.ssh_auth_keys %}
- {{ key }}
{% endfor %}
# set the locale to a given locale
# default: en_US.UTF-8
locale: en_US.UTF-8
timezone: {{ common.timezone }}
hostname: {{ common.hostname }}
fqdn: {{ common.fqdn }}
final_message: "YAY! The system is finally up, after $UPTIME seconds"

View File

@ -0,0 +1,11 @@
# instance-id will be autogenerated
# instance-id: iid-abcdefg
#network-interfaces: |
# auto {{ common.admin_iface_name|default("eth0") }}
# iface {{ common.admin_iface_name|default("eth0") }} inet static
# address {{ common.admin_ip }}
# # network 192.168.1.0
# netmask {{ common.admin_mask }}
# # broadcast 192.168.1.255
# # gateway 192.168.1.254
hostname: {{ common.hostname }}

View File

@ -0,0 +1,11 @@
# instance-id will be autogenerated
# instance-id: iid-abcdefg
#network-interfaces: |
# auto {{ common.admin_iface_name|default("eth0") }}
# iface {{ common.admin_iface_name|default("eth0") }} inet static
# address {{ common.admin_ip }}
# # network 192.168.1.0
# netmask {{ common.admin_mask }}
# # broadcast 192.168.1.255
# # gateway 192.168.1.254
hostname: {{ common.hostname }}

View File

@ -0,0 +1,220 @@
{
"profile": "ubuntu_1404_x86_64",
"name_servers_search": "\"example.com\"",
"uid": "2",
"interfaces": {
"eth1": {
"static": "0",
"mac_address": "08:00:27:6e:6d:b4"
},
"eth0": {
"ip_address": "10.0.2.15",
"dns_name": "node-8.test.domain.local",
"netmask": "255.255.255.0",
"static": "0",
"mac_address": "08:00:27:ea:35:e7"
}
},
"interfaces_extra": {
"eth1": {
"onboot": "no",
"peerdns": "no"
},
"eth0": {
"onboot": "no",
"peerdns": "no"
}
},
"power_type": "ssh",
"power_user": "root",
"kernel_options": {
"udevrules": "08:00:27:6e:6d:b4_eth1,08:00:27:ea:35:e7_eth0",
"netcfg/choose_interface": "08:00:27:ea:35:e7"
},
"power_address": "10.20.0.1",
"name_servers": "\"127.0.0.1\"",
"ks_meta": {
"gw": "10.20.0.1",
"mco_enable": 1,
"mco_vhost": "mcollective",
"repo_setup": {
"installer_kernel": {
"local": "/var/www/nailgun/ubuntu/x86_64/images/linux",
"remote_relative": "dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux"
},
"repos": [
{
"name": "ubuntu",
"section": "main universe multiverse",
"uri": "http://archive.ubuntu.com/ubuntu/",
"priority": null,
"suite": "trusty",
"type": "deb"
},
{
"name": "ubuntu-updates",
"section": "main universe multiverse",
"uri": "http://archive.ubuntu.com/ubuntu/",
"priority": null,
"suite": "trusty-updates",
"type": "deb"
},
{
"name": "ubuntu-security",
"section": "main universe multiverse",
"uri": "http://archive.ubuntu.com/ubuntu/",
"priority": null,
"suite": "trusty-security",
"type": "deb"
},
{
"name": "mos",
"section": "main restricted",
"uri": "http://127.0.0.1:8080/2015.1.0-7.0/ubuntu/x86_64",
"priority": 1050,
"suite": "mos7.0",
"type": "deb"
},
{
"name": "mos-updates",
"section": "main restricted",
"uri": "http://mirror.fuel-infra.org/mos/ubuntu/",
"priority": 1050,
"suite": "mos7.0-updates",
"type": "deb"
},
{
"name": "mos-security",
"section": "main restricted",
"uri": "http://mirror.fuel-infra.org/mos/ubuntu/",
"priority": 1050,
"suite": "mos7.0-security",
"type": "deb"
},
{
"name": "mos-holdback",
"section": "main restricted",
"uri": "http://mirror.fuel-infra.org/mos/ubuntu/",
"priority": 1100,
"suite": "mos7.0-holdback",
"type": "deb"
},
{
"name": "Auxiliary",
"section": "main restricted",
"uri": "http://127.0.0.1:8080/2015.1.0-7.0/ubuntu/auxiliary",
"priority": 1150,
"suite": "auxiliary",
"type": "deb"
}
],
"metadata": {
"always_editable": true,
"weight": 50,
"label": "Repositories"
},
"installer_initrd": {
"local": "/var/www/nailgun/ubuntu/x86_64/images/initrd.gz",
"remote_relative": "dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz"
}
},
"authorized_keys": [],
"mlnx_iser_enabled": false,
"mco_pskey": "Gie6iega9ohngaenahthohngu8aebohxah9seidi",
"mco_user": "guest",
"puppet_enable": 0,
"fuel_version": "6.1",
"install_log_2_syslog": 1,
"image_data": {
"/boot": {
"container": "gzip",
"uri": "http://10.0.0.2:8001/tmp/targetimages/env_3_ubuntu_1404_amd64-boot.img.gz",
"format": "ext2"
},
"/": {
"container": "gzip",
"uri": "http://10.0.0.2:8001/tmp/targetimages/env_3_ubuntu_1404_amd64.img.gz",
"format": "ext4"
}
},
"timezone": "Etc/UTC",
"puppet_auto_setup": 1,
"puppet_master": "localhost",
"mco_auto_setup": 1,
"mco_password": "guest",
"auth_key": "\"\"",
"pm_data": {
"kernel_params": "console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset",
"ks_spaces": [
{
"name": "sda",
"extra": [],
"free_space": 304617,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"file_system": "ext2",
"name": "Boot",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"vg": "os",
"type": "pv",
"lvm_meta_size": 64,
"size": 20000,
"orig_size": 59456
}
],
"type": "disk",
"id": "sda",
"size": 42800,
"orig_size": 305245
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 19936,
"orig_min_size": 59392,
"volumes": [
{
"mount": "/",
"size": 11744,
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": 8192,
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
}
]
},
"mlnx_plugin_mode": "disabled",
"master_ip": "127.0.0.1",
"mco_connector": "rabbitmq",
"mlnx_vf_num": "16",
"admin_net": "10.20.0.0/24",
"mco_host": "localhost"
},
"name": "node-2",
"hostname": "node-2.example.com",
"slave_name": "node-2",
"power_pass": "/root/.ssh/bootstrap.rsa",
"netboot_enabled": "1"
}

View File

@ -0,0 +1,10 @@
mkdir -p {{dest}}
{% for transport in remote %}
{% if transport.name == 'ssh' %}
scp -i {{transport.key}} -r {{transport.user}}@{{remote_ip}}:/{{remote_path}} {{dest}}
exit 0
{% endif %}
{% endfor %}
echo 'No suitable transport.'
exit 2

View File

@ -0,0 +1,20 @@
id: remote_file
handler: shell
version: 1.0.0
input:
ip:
schema: str!
value:
remote:
schema: {}
value:
remote_ip:
schema: str!
value:
remote_path:
schema: str!
value:
dest:
schema: str!
value:
tags: []

View File

@ -96,8 +96,18 @@ class React(Event):
if self.parent_node in changes_graph:
if self.child_node not in changes_graph:
# TODO: solve this circular import problem
from solar.core import resource
try:
loaded_resource = resource.load(self.parent)
except KeyError:
# orm throws this error when we're NOT using resource there
location_id = None
else:
location_id = loaded_resource.args['location_id']
changes_graph.add_node(
self.child_node, status='PENDING',
target=location_id,
errmsg=None, type='solar_resource',
args=[self.child, self.child_action])
@ -112,7 +122,17 @@ class StateChange(Event):
def insert(self, changed_resources, changes_graph):
changed_resources.append(self.parent)
# TODO: solve this circular import problem
from solar.core import resource
try:
loaded_resource = resource.load(self.parent)
except KeyError:
# orm throws this error when we're NOT using resource there
location_id = None
else:
location_id = loaded_resource.args['location_id']
changes_graph.add_node(
self.parent_node, status='PENDING',
target=location_id,
errmsg=None, type='solar_resource',
args=[self.parent, self.parent_action])

View File

@ -72,5 +72,5 @@ def target_based_rule(dg, inprogress, item, limit=1):
return limit > target_count
def items_rule(dg, inprogress, item, limit=1):
def items_rule(dg, inprogress, item, limit=100):
return len(inprogress) < limit

View File

@ -0,0 +1,43 @@
id: not_provisioned_nodes
resources:
{% for node in nodes %}
{% set mac = node.mac | replace(':', '_') %}
- id: ssh_transport{{ mac }}
from: resources/transport_ssh
values:
ssh_user: 'root'
ssh_key: '/vagrant/tmp/keys/ssh_private'
- id: transports{{mac}}
from: resources/transports
values:
transports:key: ssh_transport{{mac}}::ssh_key
transports:user: ssh_transport{{mac}}::ssh_user
transports:port: ssh_transport{{mac}}::ssh_port
transports:name: ssh_transport{{mac}}::name
- id: node_{{mac}}
from: resources/not_provisioned_node
values:
ip: {{node.ip}}
transports_id: transports{{mac}}::transports_id
name: node_{{mac}}
admin_mac: {{node.mac}}
{% endfor %}
- id: ssh_transport_master
from: resources/transport_ssh
values:
ssh_user: 'vagrant'
ssh_key: '/vagrant/.vagrant/machines/solar-dev/virtualbox/private_key'
- id: transports_master
from: resources/transports
values:
transports:key: ssh_transport_master::ssh_key
transports:user: ssh_transport_master::ssh_user
transports:port: ssh_transport_master::ssh_port
transports:name: ssh_transport_master::name
- id: node_master
from: resources/ro_node
values:
name: node_master
ip: '10.0.2.15'
transports_id: transports_master::transports_id