From 2d57c49be362821142936d095ef359712538f1f8 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 12 Oct 2015 17:57:34 +0300 Subject: [PATCH 01/15] Add remote file resource which will download keys --- examples/library_ceph/ceph.py | 8 +++++++- resources/ceph_keys/meta.yaml | 3 +++ resources/remote_file/actions/run.sh | 3 +++ resources/remote_file/meta.yaml | 23 +++++++++++++++++++++++ 4 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 resources/remote_file/actions/run.sh create mode 100644 resources/remote_file/meta.yaml diff --git a/examples/library_ceph/ceph.py b/examples/library_ceph/ceph.py index b48ade0d..b6a0e579 100644 --- a/examples/library_ceph/ceph.py +++ b/examples/library_ceph/ceph.py @@ -37,12 +37,18 @@ def deploy(): db.clear() resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 1}) first_node = next(x for x in resources if x.name.startswith('node')) - + ssh = next(x for x in resources if x.name.startswith('ssh')) library = vr.create('library1', 'resources/fuel_library', {})[0] first_node.connect(library) keys = vr.create('ceph_key', 'resources/ceph_keys', {})[0] + remote_file = vr.create('ceph_key2', 'resources/remote_file', + {'dest': '/var/lib/astute/'})[0] first_node.connect(keys) + keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'}) + ssh.connect(remote_file, + {'ssh_key': 'remote_key', 'ssh_user': 'remote_user'}) + first_node.connect(remote_file) ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon', {'storage': STORAGE, diff --git a/resources/ceph_keys/meta.yaml b/resources/ceph_keys/meta.yaml index 4a61d4f6..43a2d25a 100644 --- a/resources/ceph_keys/meta.yaml +++ b/resources/ceph_keys/meta.yaml @@ -11,4 +11,7 @@ input: key_name: schema: str! value: ceph + path: + schema: str! + value: /var/lib/astute/ceph/ tags: [] diff --git a/resources/remote_file/actions/run.sh b/resources/remote_file/actions/run.sh new file mode 100644 index 00000000..cb2cb39e --- /dev/null +++ b/resources/remote_file/actions/run.sh @@ -0,0 +1,3 @@ +mkdir -p {{remote_path}} + +scp -i {{remote_key}} -r {{remote_user}}@{{remote_ip}}:/{{remote_path}} {{dest}} diff --git a/resources/remote_file/meta.yaml b/resources/remote_file/meta.yaml new file mode 100644 index 00000000..1b4bed9f --- /dev/null +++ b/resources/remote_file/meta.yaml @@ -0,0 +1,23 @@ +id: remote_file +handler: shell +version: 1.0.0 +input: + ip: + schema: str! + value: + remote_ip: + schema: str! + value: + remote_user: + schema: {} + value: + remote_path: + schema: str! + value: + remote_key: + schema: str! + value: + dest: + schema: str! + value: +tags: [] From 588bfe83f8cc26f0a71d34233f71497e82214207 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 13 Oct 2015 11:44:44 +0300 Subject: [PATCH 02/15] Change remote_file to use transports --- examples/library_ceph/ceph.py | 16 +++++++++------- resources/remote_file/actions/run.sh | 11 +++++++++-- resources/remote_file/meta.yaml | 9 +++------ 3 files changed, 21 insertions(+), 15 deletions(-) diff --git a/examples/library_ceph/ceph.py b/examples/library_ceph/ceph.py index b6a0e579..2529c335 100644 --- a/examples/library_ceph/ceph.py +++ b/examples/library_ceph/ceph.py @@ -35,20 +35,22 @@ NETWORK_METADATA = yaml.load(""" def deploy(): db.clear() - resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 1}) - first_node = next(x for x in resources if x.name.startswith('node')) - ssh = next(x for x in resources if x.name.startswith('ssh')) + resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 2}) + first_node, second_node = [x for x in resources if x.name.startswith('node')] + first_transp = next(x for x in resources if x.name.startswith('transport')) + library = vr.create('library1', 'resources/fuel_library', {})[0] first_node.connect(library) keys = vr.create('ceph_key', 'resources/ceph_keys', {})[0] + first_node.connect(keys) + remote_file = vr.create('ceph_key2', 'resources/remote_file', {'dest': '/var/lib/astute/'})[0] - first_node.connect(keys) + second_node.connect(remote_file) keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'}) - ssh.connect(remote_file, - {'ssh_key': 'remote_key', 'ssh_user': 'remote_user'}) - first_node.connect(remote_file) + first_transp.connect(remote_file, {'transports': 'remote'}) + ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon', {'storage': STORAGE, diff --git a/resources/remote_file/actions/run.sh b/resources/remote_file/actions/run.sh index cb2cb39e..212bdfb4 100644 --- a/resources/remote_file/actions/run.sh +++ b/resources/remote_file/actions/run.sh @@ -1,3 +1,10 @@ -mkdir -p {{remote_path}} +mkdir -p {{dest}} -scp -i {{remote_key}} -r {{remote_user}}@{{remote_ip}}:/{{remote_path}} {{dest}} +{% for transport in remote %} + {% if transport.name == 'ssh' %} +scp -i {{transport.key}} -r {{transport.user}}@{{remote_ip}}:/{{remote_path}} {{dest}} +exit 0 + {% endif %} +{% endfor %} +echo 'No suitable transport.' +exit 2 diff --git a/resources/remote_file/meta.yaml b/resources/remote_file/meta.yaml index 1b4bed9f..6e319bdb 100644 --- a/resources/remote_file/meta.yaml +++ b/resources/remote_file/meta.yaml @@ -5,18 +5,15 @@ input: ip: schema: str! value: + remote: + schema: {} + value: remote_ip: schema: str! value: - remote_user: - schema: {} - value: remote_path: schema: str! value: - remote_key: - schema: str! - value: dest: schema: str! value: From c029035ea96f30551aab81cb033c6e510f2beffe Mon Sep 17 00:00:00 2001 From: Sebastian Kalinowski Date: Tue, 20 Oct 2015 12:24:21 +0200 Subject: [PATCH 03/15] Force mac address updates for slaves When using VirtualBox as a provider, "slaves" had the same MAC as first network adapter in "master", which was causing issues with networking. Now MAC will be randomly created. --- Vagrantfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Vagrantfile b/Vagrantfile index fe97b07f..d537996f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -151,6 +151,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| "--memory", SLAVES_RAM, "--cpus", SLAVES_CPUS, "--ioapic", "on", + "--macaddress1", "auto", ] if PARAVIRT_PROVIDER v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest From 64a075aad7d339b3f36ecb2b627ca052debf953d Mon Sep 17 00:00:00 2001 From: Evgeniy L Date: Mon, 19 Oct 2015 20:13:41 +0300 Subject: [PATCH 04/15] Add node resource which can provisioned using solar --- bootstrap/playbooks/files/dnsmasq_pxe.conf | 2 +- .../playbooks/files/nginx_vagrant_dir.cfg | 8 + bootstrap/playbooks/pxe.yaml | 2 + examples/provisioning/provision.py | 38 +++ examples/provisioning/provision.sh | 14 ++ .../dnsmasq/actions/exclude_mac_pxe.yaml | 6 + resources/dnsmasq/actions/run.yaml | 2 + resources/dnsmasq/meta.yaml | 18 ++ .../not_provisioned_node/actions/provision.sh | 9 + .../not_provisioned_node/actions/reboot.sh | 6 + resources/not_provisioned_node/actions/run.sh | 6 + resources/not_provisioned_node/meta.yaml | 25 ++ .../templates/agent.config | 2 + .../boothook_centos.jinja2 | 109 +++++++++ .../boothook_ubuntu.jinja2 | 96 ++++++++ .../cloud_config_centos.jinja2 | 104 +++++++++ .../cloud_config_ubuntu.jinja2 | 103 ++++++++ .../meta-data_centos.jinja2 | 11 + .../meta-data_ubuntu.jinja2 | 11 + .../templates/provisioning.json | 220 ++++++++++++++++++ templates/not_provisioned_nodes.yaml | 42 ++++ 21 files changed, 833 insertions(+), 1 deletion(-) create mode 100644 bootstrap/playbooks/files/nginx_vagrant_dir.cfg create mode 100755 examples/provisioning/provision.py create mode 100755 examples/provisioning/provision.sh create mode 100644 resources/dnsmasq/actions/exclude_mac_pxe.yaml create mode 100644 resources/dnsmasq/actions/run.yaml create mode 100644 resources/dnsmasq/meta.yaml create mode 100644 resources/not_provisioned_node/actions/provision.sh create mode 100644 resources/not_provisioned_node/actions/reboot.sh create mode 100644 resources/not_provisioned_node/actions/run.sh create mode 100644 resources/not_provisioned_node/meta.yaml create mode 100644 resources/not_provisioned_node/templates/agent.config create mode 100644 resources/not_provisioned_node/templates/cloud-init-templates/boothook_centos.jinja2 create mode 100644 resources/not_provisioned_node/templates/cloud-init-templates/boothook_ubuntu.jinja2 create mode 100644 resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2 create mode 100644 resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 create mode 100644 resources/not_provisioned_node/templates/cloud-init-templates/meta-data_centos.jinja2 create mode 100644 resources/not_provisioned_node/templates/cloud-init-templates/meta-data_ubuntu.jinja2 create mode 100644 resources/not_provisioned_node/templates/provisioning.json create mode 100644 templates/not_provisioned_nodes.yaml diff --git a/bootstrap/playbooks/files/dnsmasq_pxe.conf b/bootstrap/playbooks/files/dnsmasq_pxe.conf index 05e9e45f..e1a04f9e 100644 --- a/bootstrap/playbooks/files/dnsmasq_pxe.conf +++ b/bootstrap/playbooks/files/dnsmasq_pxe.conf @@ -6,7 +6,7 @@ bind-interfaces dhcp-range={{dhcp_range_start}},{{dhcp_range_end}},12h # Net boot file name -dhcp-boot=tag:!nopxe,pxelinux.0 +dhcp-boot=net:!nopxe,pxelinux.0 # Configure tftp enable-tftp diff --git a/bootstrap/playbooks/files/nginx_vagrant_dir.cfg b/bootstrap/playbooks/files/nginx_vagrant_dir.cfg new file mode 100644 index 00000000..132aa811 --- /dev/null +++ b/bootstrap/playbooks/files/nginx_vagrant_dir.cfg @@ -0,0 +1,8 @@ +server { + listen 8001; + root /vagrant; + + location / { + autoindex on; + } +} diff --git a/bootstrap/playbooks/pxe.yaml b/bootstrap/playbooks/pxe.yaml index 08853539..4f9e13eb 100644 --- a/bootstrap/playbooks/pxe.yaml +++ b/bootstrap/playbooks/pxe.yaml @@ -47,4 +47,6 @@ # Configure http server to load root - apt: name=nginx state=present - template: src=files/nginx.cfg dest=/etc/nginx/conf.d/pxe_image.conf + # Configure http server in order serve file in '/vagrant' directory + - template: src=files/nginx_vagrant_dir.cfg dest=/etc/nginx/conf.d/vagrant_dir.conf - service: name=nginx state=restarted diff --git a/examples/provisioning/provision.py b/examples/provisioning/provision.py new file mode 100755 index 00000000..9f83d5d3 --- /dev/null +++ b/examples/provisioning/provision.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +import requests + +from solar.core import resource +from solar.core import signals +from solar.core import validation +from solar.core.resource import virtual_resource as vr + +from solar.events.controls import React +from solar.events.api import add_event + + +discovery_service = 'http://0.0.0.0:8881' + +# DEBUG use a single node +nodes_list = [requests.get(discovery_service).json()[0]] + + +# Create slave node resources +node_resources = vr.create('nodes', 'templates/not_provisioned_nodes.yaml', {'nodes': nodes_list}) + +# Get master node +master_node = filter(lambda n: n.name == 'node_master', node_resources)[0] + +# Dnsmasq resources +for node in nodes_list: + dnsmasq = vr.create('dnsmasq_{0}'.format(node['mac'].replace(':', '_')), 'resources/dnsmasq', {})[0] + node = filter(lambda n: n.name.endswith('node{0}'.format(node['mac']).replace(':', '_')), node_resources)[0] + master_node.connect(dnsmasq) + node.connect(dnsmasq, {'admin_mac': 'exclude_mac_pxe'}) + + event = React(node.name, 'run', 'success', node.name, 'provision') + add_event(event) + event = React(node.name, 'provision', 'success', dnsmasq.name, 'exclude_mac_pxe') + add_event(event) + event = React(dnsmasq.name, 'exclude_mac_pxe', 'success', node.name, 'reboot') + add_event(event) diff --git a/examples/provisioning/provision.sh b/examples/provisioning/provision.sh new file mode 100755 index 00000000..c7265890 --- /dev/null +++ b/examples/provisioning/provision.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -eux + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +solar resource clear_all +python "${DIR}"/provision.py + +solar changes stage +solar changes process +solar orch run-once last +watch --color -n1 'solar orch report last' + diff --git a/resources/dnsmasq/actions/exclude_mac_pxe.yaml b/resources/dnsmasq/actions/exclude_mac_pxe.yaml new file mode 100644 index 00000000..91b16416 --- /dev/null +++ b/resources/dnsmasq/actions/exclude_mac_pxe.yaml @@ -0,0 +1,6 @@ +- hosts: [{{host}}] + sudo: yes + + tasks: + - lineinfile: create=yes dest=/etc/dnsmasq.d/no_pxe_{{exclude_mac_pxe | replace(':', '_')}}.conf line="dhcp-host={{exclude_mac_pxe}},set:nopxe" + - shell: service dnsmasq restart diff --git a/resources/dnsmasq/actions/run.yaml b/resources/dnsmasq/actions/run.yaml new file mode 100644 index 00000000..9c29505e --- /dev/null +++ b/resources/dnsmasq/actions/run.yaml @@ -0,0 +1,2 @@ +- hosts: [{{host}}] + sudo: yes diff --git a/resources/dnsmasq/meta.yaml b/resources/dnsmasq/meta.yaml new file mode 100644 index 00000000..213056ad --- /dev/null +++ b/resources/dnsmasq/meta.yaml @@ -0,0 +1,18 @@ +id: dnsmasq +handler: ansible +version: 1.0.0 + +actions: + exclude_mac_pxe: exclude_mac_pxe.yaml + run: run.yaml + +input: + ip: + schema: str! + value: + + exclude_mac_pxe: + schema: str! + value: + +tags: [resources=dnsmasq] diff --git a/resources/not_provisioned_node/actions/provision.sh b/resources/not_provisioned_node/actions/provision.sh new file mode 100644 index 00000000..ccbdb45c --- /dev/null +++ b/resources/not_provisioned_node/actions/provision.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -eux +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# TODO should be a way to render configs, in order to do this +# we should have scripts dir variable passed from above +sed -i "s||${DIR}|" "${DIR}"/templates/agent.config +provision --input_data_file "${DIR}"/templates/provisioning.json --config-file "${DIR}"/templates/agent.config diff --git a/resources/not_provisioned_node/actions/reboot.sh b/resources/not_provisioned_node/actions/reboot.sh new file mode 100644 index 00000000..fc028c2e --- /dev/null +++ b/resources/not_provisioned_node/actions/reboot.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +set -eux + +reboot now + diff --git a/resources/not_provisioned_node/actions/run.sh b/resources/not_provisioned_node/actions/run.sh new file mode 100644 index 00000000..1ea0ed55 --- /dev/null +++ b/resources/not_provisioned_node/actions/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +set -eux + +# Fake run action which is required in order to make +# dependency `run` -> `provision` diff --git a/resources/not_provisioned_node/meta.yaml b/resources/not_provisioned_node/meta.yaml new file mode 100644 index 00000000..7d9a6686 --- /dev/null +++ b/resources/not_provisioned_node/meta.yaml @@ -0,0 +1,25 @@ +id: not_provisioned_node +handler: shell +version: 1.0.0 + +actions: + provision: provision.sh + run: run.sh + reboot: reboot.sh + +input: + ip: + schema: str! + value: + admin_mac: + schema: str! + value: + name: + schema: str + value: a node + location_id: + schema: str! + value: $uuid + reverse: True + +tags: [resources=node] diff --git a/resources/not_provisioned_node/templates/agent.config b/resources/not_provisioned_node/templates/agent.config new file mode 100644 index 00000000..d51d09a3 --- /dev/null +++ b/resources/not_provisioned_node/templates/agent.config @@ -0,0 +1,2 @@ +[DEFAULT] +nc_template_path=/templates/cloud-init-templates/ diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/boothook_centos.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/boothook_centos.jinja2 new file mode 100644 index 00000000..732bf752 --- /dev/null +++ b/resources/not_provisioned_node/templates/cloud-init-templates/boothook_centos.jinja2 @@ -0,0 +1,109 @@ +#cloud-boothook +#!/bin/bash + +function add_str_to_file_if_not_exists { + file=$1 + str=$2 + val=$3 + if ! grep -q "^ *${str}" $file; then + echo $val >> $file + fi +} + + +cloud-init-per instance disable_selinux_on_the_fly setenforce 0 + +cloud-init-per instance disable_selinux sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux + + +# configure udev rules + +# udev persistent net +cloud-init-per instance udev_persistent_net1 service network stop + +ADMIN_MAC={{ common.admin_mac }} +ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1) +cloud-init-per instance configure_admin_interface /bin/sh -c "echo -e \"# FROM COBBLER SNIPPET\nDEVICE=$ADMIN_IF\nIPADDR={{ common.admin_ip }}\nNETMASK={{ common.admin_mask }}\nBOOTPROTO=none\nONBOOT=yes\nUSERCTL=no\n\" | tee /etc/sysconfig/network-scripts/ifcfg-$ADMIN_IF" + +cloud-init-per instance set_gateway /bin/sh -c 'echo GATEWAY="{{ common.gw }}" | tee -a /etc/sysconfig/network' + +cloud-init-per instance udev_persistent_net5 service network start + +# end of udev + +#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned, +# cloud-init will start to generate resolv.conf with non-actual data +cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf +cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf' +cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf' +cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf' +cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip }} | tee -a /etc/resolv.conf' + +# configure black module lists +# virt-what should be installed +if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then + ([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) + modprobe -r i2c_piix4 +fi + +cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/rc.modules' +cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/rc.modules' +cloud-init-per instance conntrack_proto_gre /bin/sh -c 'echo nf_conntrack_proto_gre | tee -a /etc/rc.modules' +cloud-init-per instance chmod_rc_modules chmod +x /etc/rc.modules +cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf' +cloud-init-per instance kernel_panic /bin/sh -c 'echo "kernel.panic=60" | tee -a /etc/sysctl.conf' + +cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4 +cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6 +cloud-init-per instance conntrack_proto_gre_load modprobe nf_conntrack_proto_gre +cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576" +cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60" + +cloud-init-per instance mkdir_coredump mkdir -p /var/log/coredump +cloud-init-per instance set_coredump /bin/sh -c 'echo -e "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" | tee -a /etc/sysctl.conf' +cloud-init-per instance set_coredump_sysctl sysctl -w "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" +cloud-init-per instance set_chmod chmod 777 /var/log/coredump +cloud-init-per instance set_limits /bin/sh -c 'echo -e "* soft core unlimited\n* hard core unlimited" | tee -a /etc/security/limits.conf' + + +#NOTE: disabled for centos? +#cloud-init-per instance dhclient echo 'supersede routers 0;' | tee /etc/dhcp/dhclient.conf + +# ntp sync +# '| tee /dev/null' is needed for returning zero execution code always +cloud-init-per instance stop_ntpd /bin/sh -c 'service ntpd stop | tee /dev/null' +cloud-init-per instance sync_date ntpdate -t 4 -b {{ common.master_ip }} +cloud-init-per instance sync_hwclock hwclock --systohc + +cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf +cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf +cloud-init-per instance edit_ntp_conf_mkdir mkdir -p /var/lib/ntp +cloud-init-per instance edit_ntp_conf3 /bin/sh -c 'echo 0 | tee /var/lib/ntp/drift' +cloud-init-per instance edit_ntp_conf4 chown ntp: /var/lib/ntp/drift +cloud-init-per instance edit_ntp_conf5 sed -i '/^\s*server/ d' /etc/ntp.conf +cloud-init-per instance edit_ntp_conf6 /bin/sh -c 'echo "server {{ common.master_ip }} burst iburst" | tee -a /etc/ntp.conf' + + +# Point installed ntpd to Master node +cloud-init-per instance set_ntpdate sed -i 's/SYNC_HWCLOCK\s*=\s*no/SYNC_HWCLOCK=yes/' /etc/sysconfig/ntpdate +cloud-init-per instance set_ntpd_0 chkconfig ntpd on +cloud-init-per instance set_ntpd_1 chkconfig ntpdate on +cloud-init-per instance start_ntpd service ntpd start + +cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config +add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no' + +cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config + +cloud-init-per instance nailgun_agent_0 /bin/sh -c 'echo "rm -f /etc/nailgun-agent/nodiscover" | tee /etc/rc.local' +cloud-init-per instance nailgun_agent_1 /bin/sh -c 'echo "flock -w 0 -o /var/lock/agent.lock -c \"/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1\"" | tee -a /etc/rc.local' + +# Copying default bash settings to the root directory +cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/ + +# Puppet config +cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera +cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml + +# Mcollective enable +cloud-init-per instance mcollective_enable sed -i /etc/rc.d/init.d/mcollective -e 's/\(# chkconfig:\s\+[-0-6]\+\) [0-9]\+ \([0-9]\+\)/\1 81 \2/' diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/boothook_ubuntu.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/boothook_ubuntu.jinja2 new file mode 100644 index 00000000..e3c7dd91 --- /dev/null +++ b/resources/not_provisioned_node/templates/cloud-init-templates/boothook_ubuntu.jinja2 @@ -0,0 +1,96 @@ +#cloud-boothook +#!/bin/bash + +function add_str_to_file_if_not_exists { + file=$1 + str=$2 + val=$3 + if ! grep -q "^ *${str}" $file; then + echo $val >> $file + fi +} + +cloud-init-per instance wipe_sources_list_templates /bin/sh -c 'echo | tee /etc/cloud/templates/sources.list.ubuntu.tmpl' + +# configure udev rules + +# udev persistent net +cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop + +ADMIN_MAC={{ common.admin_mac }} +ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1) +# Check if we do not already have static config (or interface seems unconfigured) +if [ ! -d "/etc/network/interfaces.d" ]; then + mkdir -p /etc/network/interfaces.d + echo 'source /etc/network/interfaces.d/*' > /etc/network/interfaces +fi +if [ ! -e "/etc/network/interfaces.d/ifcfg-$ADMIN_IF" ]; then + echo -e "auto $ADMIN_IF\niface $ADMIN_IF inet static\n\taddress {{ common.admin_ip }}\n\tnetmask {{ common.admin_mask }}\n\tgateway {{ common.gw }}" > /etc/network/interfaces.d/ifcfg-"$ADMIN_IF" +fi + +cloud-init-per instance udev_persistent_net5 /etc/init.d/networking start + +# end of udev + +#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned, +# cloud-init will start to generate resolv.conf with non-actual data +cloud-init-per instance resolv_conf_mkdir mkdir -p /etc/resolvconf/resolv.conf.d +cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf +cloud-init-per instance resolv_conf_head_remove rm -f /etc/resolvconf/resolv.conf.d/head +cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf' +cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf' +cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf' +cloud-init-per instance resolv_conf_head_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolvconf/resolv.conf.d/head' +cloud-init-per instance resolv_conf_head_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head' +cloud-init-per instance resolv_conf_head_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head' +cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolv.conf' +cloud-init-per instance resolv_conf_head_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolvconf/resolv.conf.d/head' + +# configure black module lists +# virt-what should be installed +if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then + ([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) && update-initramfs -u -k all + modprobe -r i2c_piix4 +fi + +cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/modules' +cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/modules' +cloud-init-per instance conntrack_proto_gre /bin/sh -c 'echo nf_conntrack_proto_gre | tee -a /etc/modules' +cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf' +cloud-init-per instance kernel_panic /bin/sh -c 'echo "kernel.panic=60" | tee -a /etc/sysctl.conf' + +cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4 +cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6 +cloud-init-per instance conntrack_proto_gre_load modprobe nf_conntrack_proto_gre +cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576" +cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60" + +cloud-init-per instance dhclient /bin/sh -c 'echo "supersede routers 0;" | tee /etc/dhcp/dhclient.conf' + +# ntp sync +# '| tee /dev/null' is needed for returning zero execution code always +cloud-init-per instance stop_ntp /bin/sh -c 'service ntp stop | tee /dev/null' +cloud-init-per instance sync_date ntpdate -t 4 -b {{ common.master_ip }} +cloud-init-per instance sync_hwclock hwclock --systohc + +cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf +cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf +cloud-init-per instance edit_ntp_conf_mkdir mkdir -p /var/lib/ntp +cloud-init-per instance edit_ntp_conf3 /bin/sh -c 'echo 0 | tee /var/lib/ntp/drift' +cloud-init-per instance edit_ntp_conf4 sed -i '/^\s*server/ d' /etc/ntp.conf +cloud-init-per instance edit_ntp_conf5 /bin/sh -c 'echo "server {{ common.master_ip }} burst iburst" | tee -a /etc/ntp.conf' +cloud-init-per instance start_ntp service ntp start + +cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config +add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no' + +cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config + +cloud-init-per instance nailgun_agent_0 /bin/sh -c 'echo "rm -f /etc/nailgun-agent/nodiscover" | tee /etc/rc.local' +cloud-init-per instance nailgun_agent_1 /bin/sh -c 'echo "flock -w 0 -o /var/lock/agent.lock -c \"/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1\"" | tee -a /etc/rc.local' + +# Copying default bash settings to the root directory +cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/ + +cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera +cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2 new file mode 100644 index 00000000..717a9cec --- /dev/null +++ b/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2 @@ -0,0 +1,104 @@ +#cloud-config +resize_rootfs: false +growpart: + mode: false +disable_ec2_metadata: true +disable_root: false + +# password: RANDOM +# chpasswd: { expire: True } + +ssh_pwauth: false +ssh_authorized_keys: +{% for key in common.ssh_auth_keys %} + - {{ key }} +{% endfor %} + +# set the locale to a given locale +# default: en_US.UTF-8 +locale: en_US.UTF-8 + +timezone: {{ common.timezone }} + +hostname: {{ common.hostname }} +fqdn: {{ common.fqdn }} + +# add entries to rsyslog configuration +rsyslog: + - filename: 10-log2master.conf + content: | + $template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n" + *.* @{{ common.master_ip }};LogToMaster + +runcmd: +{% if puppet.enable != 1 %} + - service puppet stop + - chkconfig puppet off +{% endif %} +{% if mcollective.enable != 1 %} + - service mcollective stop + - chkconfig mcollective off +{% else %} + - chkconfig mcollective on + - service mcollective restart +{% endif %} + - iptables -t filter -F INPUT + - iptables -t filter -F FORWARD + - service iptables save + +# that module's missing in 0.6.3, but existent for >= 0.7.3 +write_files: + - content: | + --- + url: {{ common.master_url }} + path: /etc/nailgun-agent/config.yaml + - content: target + path: /etc/nailgun_systemtype + +mcollective: + conf: + main_collective: mcollective + collectives: mcollective + libdir: /usr/libexec/mcollective + logfile: /var/log/mcollective.log + loglevel: debug + daemonize: 1 + direct_addressing: 1 + ttl: 4294957 + securityprovider: psk + plugin.psk: {{ mcollective.pskey }} +{% if mcollective.connector == 'stomp' %} + connector = stomp + plugin.stomp.host: {{ mcollective.host }} + plugin.stomp.port: {{ mcollective.port|default(61613) }} + plugin.stomp.user: {{ mcollective.user }} + plugin.stomp.password: {{ mcollective.password }} +{% else %} + connector: rabbitmq + plugin.rabbitmq.vhost: {{ mcollective.vhost }} + plugin.rabbitmq.pool.size: 1 + plugin.rabbitmq.pool.1.host: {{ mcollective.host }} + plugin.rabbitmq.pool.1.port: {{ mcollective.port|default(61613) }} + plugin.rabbitmq.pool.1.user: {{ mcollective.user }} + plugin.rabbitmq.pool.1.password: {{ mcollective.password }} + plugin.rabbitmq.heartbeat_interval: 30 +{% endif %} + factsource: yaml + plugin.yaml: /etc/mcollective/facts.yaml + +puppet: + conf: + main: + logdir: /var/log/puppet + rundir: /var/run/puppet + ssldir: $vardir/ssl + pluginsync: true + agent: + classfile: $vardir/classes.txt + localconfig: $vardir/localconfig + server: {{ puppet.master }} + report: false + configtimeout: 600 + + +final_message: "YAY! The system is finally up, after $UPTIME seconds" diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 new file mode 100644 index 00000000..61e67583 --- /dev/null +++ b/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 @@ -0,0 +1,103 @@ +#cloud-config +resize_rootfs: false +growpart: + mode: false +disable_ec2_metadata: true +disable_root: false +user: root +password: r00tme +chpasswd: { expire: false } +ssh_pwauth: false +ssh_authorized_keys: +{% for key in common.ssh_auth_keys %} + - {{ key }} +{% endfor %} + +# set the locale to a given locale +# default: en_US.UTF-8 +locale: en_US.UTF-8 + +timezone: {{ common.timezone }} + +hostname: {{ common.hostname }} +fqdn: {{ common.fqdn }} + + +# add entries to rsyslog configuration +rsyslog: + - filename: 10-log2master.conf + content: | + $template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n" + *.* @{{ common.master_ip }};LogToMaster + + +# that module's missing in 0.6.3, but existent for >= 0.7.3 +write_files: + - content: | + --- + url: {{ common.master_url }} + path: /etc/nailgun-agent/config.yaml + - content: target + path: /etc/nailgun_systemtype + +mcollective: + conf: + main_collective: mcollective + collectives: mcollective + libdir: /usr/share/mcollective/plugins + logfile: /var/log/mcollective.log + loglevel: debug + daemonize: 0 + direct_addressing: 1 + ttl: 4294957 + securityprovider: psk + plugin.psk: {{ mcollective.pskey }} +{% if mcollective.connector == 'stomp' %} + connector = stomp + plugin.stomp.host: {{ mcollective.host }} + plugin.stomp.port: {{ mcollective.port|default(61613) }} + plugin.stomp.user: {{ mcollective.user }} + plugin.stomp.password: {{ mcollective.password }} +{% else %} + connector: rabbitmq + plugin.rabbitmq.vhost: {{ mcollective.vhost }} + plugin.rabbitmq.pool.size: 1 + plugin.rabbitmq.pool.1.host: {{ mcollective.host }} + plugin.rabbitmq.pool.1.port: {{ mcollective.port|default(61613) }} + plugin.rabbitmq.pool.1.user: {{ mcollective.user }} + plugin.rabbitmq.pool.1.password: {{ mcollective.password }} + plugin.rabbitmq.heartbeat_interval: 30 +{% endif %} + factsource: yaml + plugin.yaml: /etc/mcollective/facts.yaml + +puppet: + conf: + main: + logdir: /var/log/puppet + rundir: /var/run/puppet + ssldir: $vardir/ssl + pluginsync: true + agent: + classfile: $vardir/classes.txt + localconfig: $vardir/localconfig + server: {{ puppet.master }} + report: false + configtimeout: 600 + +runcmd: +{% if puppet.enable != 1 %} + - /usr/sbin/invoke-rc.d puppet stop + - /usr/sbin/update-rc.d -f puppet remove +{% endif %} +{% if mcollective.enable != 1 %} + - /usr/sbin/invoke-rc.d mcollective stop + - echo manual > /etc/init/mcollective.override +{% else %} + - rm -f /etc/init/mcollective.override + - service mcollective restart +{% endif %} + - iptables -t filter -F INPUT + - iptables -t filter -F FORWARD + +final_message: "YAY! The system is finally up, after $UPTIME seconds" diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/meta-data_centos.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/meta-data_centos.jinja2 new file mode 100644 index 00000000..f63a89bd --- /dev/null +++ b/resources/not_provisioned_node/templates/cloud-init-templates/meta-data_centos.jinja2 @@ -0,0 +1,11 @@ +# instance-id will be autogenerated +# instance-id: iid-abcdefg +#network-interfaces: | +# auto {{ common.admin_iface_name|default("eth0") }} +# iface {{ common.admin_iface_name|default("eth0") }} inet static +# address {{ common.admin_ip }} +# # network 192.168.1.0 +# netmask {{ common.admin_mask }} +# # broadcast 192.168.1.255 +# # gateway 192.168.1.254 +hostname: {{ common.hostname }} diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/meta-data_ubuntu.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/meta-data_ubuntu.jinja2 new file mode 100644 index 00000000..f63a89bd --- /dev/null +++ b/resources/not_provisioned_node/templates/cloud-init-templates/meta-data_ubuntu.jinja2 @@ -0,0 +1,11 @@ +# instance-id will be autogenerated +# instance-id: iid-abcdefg +#network-interfaces: | +# auto {{ common.admin_iface_name|default("eth0") }} +# iface {{ common.admin_iface_name|default("eth0") }} inet static +# address {{ common.admin_ip }} +# # network 192.168.1.0 +# netmask {{ common.admin_mask }} +# # broadcast 192.168.1.255 +# # gateway 192.168.1.254 +hostname: {{ common.hostname }} diff --git a/resources/not_provisioned_node/templates/provisioning.json b/resources/not_provisioned_node/templates/provisioning.json new file mode 100644 index 00000000..6b077189 --- /dev/null +++ b/resources/not_provisioned_node/templates/provisioning.json @@ -0,0 +1,220 @@ +{ + "profile": "ubuntu_1404_x86_64", + "name_servers_search": "\"example.com\"", + "uid": "2", + "interfaces": { + "eth1": { + "static": "0", + "mac_address": "08:00:27:6e:6d:b4" + }, + "eth0": { + "ip_address": "10.0.2.15", + "dns_name": "node-8.test.domain.local", + "netmask": "255.255.255.0", + "static": "0", + "mac_address": "08:00:27:ea:35:e7" + } + }, + "interfaces_extra": { + "eth1": { + "onboot": "no", + "peerdns": "no" + }, + "eth0": { + "onboot": "no", + "peerdns": "no" + } + }, + "power_type": "ssh", + "power_user": "root", + "kernel_options": { + "udevrules": "08:00:27:6e:6d:b4_eth1,08:00:27:ea:35:e7_eth0", + "netcfg/choose_interface": "08:00:27:ea:35:e7" + }, + "power_address": "10.20.0.1", + "name_servers": "\"127.0.0.1\"", + "ks_meta": { + "gw": "10.20.0.1", + "mco_enable": 1, + "mco_vhost": "mcollective", + "repo_setup": { + "installer_kernel": { + "local": "/var/www/nailgun/ubuntu/x86_64/images/linux", + "remote_relative": "dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux" + }, + "repos": [ + { + "name": "ubuntu", + "section": "main universe multiverse", + "uri": "http://archive.ubuntu.com/ubuntu/", + "priority": null, + "suite": "trusty", + "type": "deb" + }, + { + "name": "ubuntu-updates", + "section": "main universe multiverse", + "uri": "http://archive.ubuntu.com/ubuntu/", + "priority": null, + "suite": "trusty-updates", + "type": "deb" + }, + { + "name": "ubuntu-security", + "section": "main universe multiverse", + "uri": "http://archive.ubuntu.com/ubuntu/", + "priority": null, + "suite": "trusty-security", + "type": "deb" + }, + { + "name": "mos", + "section": "main restricted", + "uri": "http://127.0.0.1:8080/2015.1.0-7.0/ubuntu/x86_64", + "priority": 1050, + "suite": "mos7.0", + "type": "deb" + }, + { + "name": "mos-updates", + "section": "main restricted", + "uri": "http://mirror.fuel-infra.org/mos/ubuntu/", + "priority": 1050, + "suite": "mos7.0-updates", + "type": "deb" + }, + { + "name": "mos-security", + "section": "main restricted", + "uri": "http://mirror.fuel-infra.org/mos/ubuntu/", + "priority": 1050, + "suite": "mos7.0-security", + "type": "deb" + }, + { + "name": "mos-holdback", + "section": "main restricted", + "uri": "http://mirror.fuel-infra.org/mos/ubuntu/", + "priority": 1100, + "suite": "mos7.0-holdback", + "type": "deb" + }, + { + "name": "Auxiliary", + "section": "main restricted", + "uri": "http://127.0.0.1:8080/2015.1.0-7.0/ubuntu/auxiliary", + "priority": 1150, + "suite": "auxiliary", + "type": "deb" + } + ], + "metadata": { + "always_editable": true, + "weight": 50, + "label": "Repositories" + }, + "installer_initrd": { + "local": "/var/www/nailgun/ubuntu/x86_64/images/initrd.gz", + "remote_relative": "dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz" + } + }, + "authorized_keys": [], + "mlnx_iser_enabled": false, + "mco_pskey": "Gie6iega9ohngaenahthohngu8aebohxah9seidi", + "mco_user": "guest", + "puppet_enable": 0, + "fuel_version": "6.1", + "install_log_2_syslog": 1, + "image_data": { + "/boot": { + "container": "gzip", + "uri": "http://10.0.0.2:8001/tmp/targetimages/env_3_ubuntu_1404_amd64-boot.img.gz", + "format": "ext2" + }, + "/": { + "container": "gzip", + "uri": "http://10.0.0.2:8001/tmp/targetimages/env_3_ubuntu_1404_amd64.img.gz", + "format": "ext4" + } + }, + "timezone": "Etc/UTC", + "puppet_auto_setup": 1, + "puppet_master": "localhost", + "mco_auto_setup": 1, + "mco_password": "guest", + "auth_key": "\"\"", + "pm_data": { + "kernel_params": "console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset", + "ks_spaces": [ + { + "name": "sda", + "extra": [], + "free_space": 304617, + "volumes": [ + { + "type": "boot", + "size": 300 + }, + { + "mount": "/boot", + "type": "raid", + "file_system": "ext2", + "name": "Boot", + "size": 200 + }, + { + "type": "lvm_meta_pool", + "size": 0 + }, + { + "vg": "os", + "type": "pv", + "lvm_meta_size": 64, + "size": 20000, + "orig_size": 59456 + } + ], + "type": "disk", + "id": "sda", + "size": 42800, + "orig_size": 305245 + }, + { + "_allocate_size": "min", + "label": "Base System", + "min_size": 19936, + "orig_min_size": 59392, + "volumes": [ + { + "mount": "/", + "size": 11744, + "type": "lv", + "name": "root", + "file_system": "ext4" + }, + { + "mount": "swap", + "size": 8192, + "type": "lv", + "name": "swap", + "file_system": "swap" + } + ], + "type": "vg", + "id": "os" + } + ] + }, + "mlnx_plugin_mode": "disabled", + "master_ip": "127.0.0.1", + "mco_connector": "rabbitmq", + "mlnx_vf_num": "16", + "admin_net": "10.20.0.0/24", + "mco_host": "localhost" + }, + "name": "node-2", + "hostname": "node-2.example.com", + "slave_name": "node-2", + "power_pass": "/root/.ssh/bootstrap.rsa", + "netboot_enabled": "1" +} diff --git a/templates/not_provisioned_nodes.yaml b/templates/not_provisioned_nodes.yaml new file mode 100644 index 00000000..ff459ca0 --- /dev/null +++ b/templates/not_provisioned_nodes.yaml @@ -0,0 +1,42 @@ +id: not_provisioned_nodes +resources: +{% for node in nodes %} + - id: ssh_transport{{ node.mac | replace(':', '_') }} + from: resources/transport_ssh + values: + ssh_user: 'root' + ssh_key: '/vagrant/tmp/keys/ssh_private' + - id: transports{{node.mac | replace(':', '_') }} + from: resources/transports + values: + transports:key: ssh_transport{{node.mac | replace(':', '_') }}::ssh_key + transports:user: ssh_transport{{node.mac | replace(':', '_') }}::ssh_user + transports:port: ssh_transport{{node.mac | replace(':', '_') }}::ssh_port + transports:name: ssh_transport{{node.mac | replace(':', '_') }}::name + - id: node{{node.mac | replace(':', '_') }} + from: resources/not_provisioned_node + values: + ip: {{node.ip}} + transports_id: transports{{node.mac | replace(':', '_') }}::transports_id + name: node{{node.mac | replace(':', '_') }} + admin_mac: {{node.mac}} +{% endfor %} + + - id: ssh_transport_master + from: resources/transport_ssh + values: + ssh_user: 'vagrant' + ssh_key: '/vagrant/.vagrant/machines/solar-dev/virtualbox/private_key' + - id: transports_master + from: resources/transports + values: + transports:key: ssh_transport_master::ssh_key + transports:user: ssh_transport_master::ssh_user + transports:port: ssh_transport_master::ssh_port + transports:name: ssh_transport_master::name + - id: node_master + from: resources/ro_node + values: + name: node_master + ip: '10.0.2.15' + transports_id: transports_master::transports_id From 9163734304089c039d71eb2657925736d147eaef Mon Sep 17 00:00:00 2001 From: Evgeniy L Date: Mon, 19 Oct 2015 20:27:22 +0300 Subject: [PATCH 05/15] Remove Fuel specific configuration from cloud-init configs --- .../boothook_centos.jinja2 | 54 ------------- .../boothook_ubuntu.jinja2 | 41 ---------- .../cloud_config_centos.jinja2 | 78 ------------------- .../cloud_config_ubuntu.jinja2 | 78 ------------------- 4 files changed, 251 deletions(-) diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/boothook_centos.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/boothook_centos.jinja2 index 732bf752..1be4a587 100644 --- a/resources/not_provisioned_node/templates/cloud-init-templates/boothook_centos.jinja2 +++ b/resources/not_provisioned_node/templates/cloud-init-templates/boothook_centos.jinja2 @@ -1,21 +1,10 @@ #cloud-boothook #!/bin/bash -function add_str_to_file_if_not_exists { - file=$1 - str=$2 - val=$3 - if ! grep -q "^ *${str}" $file; then - echo $val >> $file - fi -} - - cloud-init-per instance disable_selinux_on_the_fly setenforce 0 cloud-init-per instance disable_selinux sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux - # configure udev rules # udev persistent net @@ -64,46 +53,3 @@ cloud-init-per instance set_coredump /bin/sh -c 'echo -e "kernel.core_pattern=/v cloud-init-per instance set_coredump_sysctl sysctl -w "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" cloud-init-per instance set_chmod chmod 777 /var/log/coredump cloud-init-per instance set_limits /bin/sh -c 'echo -e "* soft core unlimited\n* hard core unlimited" | tee -a /etc/security/limits.conf' - - -#NOTE: disabled for centos? -#cloud-init-per instance dhclient echo 'supersede routers 0;' | tee /etc/dhcp/dhclient.conf - -# ntp sync -# '| tee /dev/null' is needed for returning zero execution code always -cloud-init-per instance stop_ntpd /bin/sh -c 'service ntpd stop | tee /dev/null' -cloud-init-per instance sync_date ntpdate -t 4 -b {{ common.master_ip }} -cloud-init-per instance sync_hwclock hwclock --systohc - -cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf -cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf -cloud-init-per instance edit_ntp_conf_mkdir mkdir -p /var/lib/ntp -cloud-init-per instance edit_ntp_conf3 /bin/sh -c 'echo 0 | tee /var/lib/ntp/drift' -cloud-init-per instance edit_ntp_conf4 chown ntp: /var/lib/ntp/drift -cloud-init-per instance edit_ntp_conf5 sed -i '/^\s*server/ d' /etc/ntp.conf -cloud-init-per instance edit_ntp_conf6 /bin/sh -c 'echo "server {{ common.master_ip }} burst iburst" | tee -a /etc/ntp.conf' - - -# Point installed ntpd to Master node -cloud-init-per instance set_ntpdate sed -i 's/SYNC_HWCLOCK\s*=\s*no/SYNC_HWCLOCK=yes/' /etc/sysconfig/ntpdate -cloud-init-per instance set_ntpd_0 chkconfig ntpd on -cloud-init-per instance set_ntpd_1 chkconfig ntpdate on -cloud-init-per instance start_ntpd service ntpd start - -cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config -add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no' - -cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config - -cloud-init-per instance nailgun_agent_0 /bin/sh -c 'echo "rm -f /etc/nailgun-agent/nodiscover" | tee /etc/rc.local' -cloud-init-per instance nailgun_agent_1 /bin/sh -c 'echo "flock -w 0 -o /var/lock/agent.lock -c \"/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1\"" | tee -a /etc/rc.local' - -# Copying default bash settings to the root directory -cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/ - -# Puppet config -cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera -cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml - -# Mcollective enable -cloud-init-per instance mcollective_enable sed -i /etc/rc.d/init.d/mcollective -e 's/\(# chkconfig:\s\+[-0-6]\+\) [0-9]\+ \([0-9]\+\)/\1 81 \2/' diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/boothook_ubuntu.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/boothook_ubuntu.jinja2 index e3c7dd91..753ef758 100644 --- a/resources/not_provisioned_node/templates/cloud-init-templates/boothook_ubuntu.jinja2 +++ b/resources/not_provisioned_node/templates/cloud-init-templates/boothook_ubuntu.jinja2 @@ -1,19 +1,6 @@ #cloud-boothook #!/bin/bash -function add_str_to_file_if_not_exists { - file=$1 - str=$2 - val=$3 - if ! grep -q "^ *${str}" $file; then - echo $val >> $file - fi -} - -cloud-init-per instance wipe_sources_list_templates /bin/sh -c 'echo | tee /etc/cloud/templates/sources.list.ubuntu.tmpl' - -# configure udev rules - # udev persistent net cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop @@ -66,31 +53,3 @@ cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=104857 cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60" cloud-init-per instance dhclient /bin/sh -c 'echo "supersede routers 0;" | tee /etc/dhcp/dhclient.conf' - -# ntp sync -# '| tee /dev/null' is needed for returning zero execution code always -cloud-init-per instance stop_ntp /bin/sh -c 'service ntp stop | tee /dev/null' -cloud-init-per instance sync_date ntpdate -t 4 -b {{ common.master_ip }} -cloud-init-per instance sync_hwclock hwclock --systohc - -cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf -cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf -cloud-init-per instance edit_ntp_conf_mkdir mkdir -p /var/lib/ntp -cloud-init-per instance edit_ntp_conf3 /bin/sh -c 'echo 0 | tee /var/lib/ntp/drift' -cloud-init-per instance edit_ntp_conf4 sed -i '/^\s*server/ d' /etc/ntp.conf -cloud-init-per instance edit_ntp_conf5 /bin/sh -c 'echo "server {{ common.master_ip }} burst iburst" | tee -a /etc/ntp.conf' -cloud-init-per instance start_ntp service ntp start - -cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config -add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no' - -cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config - -cloud-init-per instance nailgun_agent_0 /bin/sh -c 'echo "rm -f /etc/nailgun-agent/nodiscover" | tee /etc/rc.local' -cloud-init-per instance nailgun_agent_1 /bin/sh -c 'echo "flock -w 0 -o /var/lock/agent.lock -c \"/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1\"" | tee -a /etc/rc.local' - -# Copying default bash settings to the root directory -cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/ - -cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera -cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2 index 717a9cec..a29701b5 100644 --- a/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2 +++ b/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2 @@ -23,82 +23,4 @@ timezone: {{ common.timezone }} hostname: {{ common.hostname }} fqdn: {{ common.fqdn }} -# add entries to rsyslog configuration -rsyslog: - - filename: 10-log2master.conf - content: | - $template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n" - *.* @{{ common.master_ip }};LogToMaster - -runcmd: -{% if puppet.enable != 1 %} - - service puppet stop - - chkconfig puppet off -{% endif %} -{% if mcollective.enable != 1 %} - - service mcollective stop - - chkconfig mcollective off -{% else %} - - chkconfig mcollective on - - service mcollective restart -{% endif %} - - iptables -t filter -F INPUT - - iptables -t filter -F FORWARD - - service iptables save - -# that module's missing in 0.6.3, but existent for >= 0.7.3 -write_files: - - content: | - --- - url: {{ common.master_url }} - path: /etc/nailgun-agent/config.yaml - - content: target - path: /etc/nailgun_systemtype - -mcollective: - conf: - main_collective: mcollective - collectives: mcollective - libdir: /usr/libexec/mcollective - logfile: /var/log/mcollective.log - loglevel: debug - daemonize: 1 - direct_addressing: 1 - ttl: 4294957 - securityprovider: psk - plugin.psk: {{ mcollective.pskey }} -{% if mcollective.connector == 'stomp' %} - connector = stomp - plugin.stomp.host: {{ mcollective.host }} - plugin.stomp.port: {{ mcollective.port|default(61613) }} - plugin.stomp.user: {{ mcollective.user }} - plugin.stomp.password: {{ mcollective.password }} -{% else %} - connector: rabbitmq - plugin.rabbitmq.vhost: {{ mcollective.vhost }} - plugin.rabbitmq.pool.size: 1 - plugin.rabbitmq.pool.1.host: {{ mcollective.host }} - plugin.rabbitmq.pool.1.port: {{ mcollective.port|default(61613) }} - plugin.rabbitmq.pool.1.user: {{ mcollective.user }} - plugin.rabbitmq.pool.1.password: {{ mcollective.password }} - plugin.rabbitmq.heartbeat_interval: 30 -{% endif %} - factsource: yaml - plugin.yaml: /etc/mcollective/facts.yaml - -puppet: - conf: - main: - logdir: /var/log/puppet - rundir: /var/run/puppet - ssldir: $vardir/ssl - pluginsync: true - agent: - classfile: $vardir/classes.txt - localconfig: $vardir/localconfig - server: {{ puppet.master }} - report: false - configtimeout: 600 - - final_message: "YAY! The system is finally up, after $UPTIME seconds" diff --git a/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 b/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 index 61e67583..94e119fe 100644 --- a/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 +++ b/resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 @@ -22,82 +22,4 @@ timezone: {{ common.timezone }} hostname: {{ common.hostname }} fqdn: {{ common.fqdn }} - -# add entries to rsyslog configuration -rsyslog: - - filename: 10-log2master.conf - content: | - $template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n" - *.* @{{ common.master_ip }};LogToMaster - - -# that module's missing in 0.6.3, but existent for >= 0.7.3 -write_files: - - content: | - --- - url: {{ common.master_url }} - path: /etc/nailgun-agent/config.yaml - - content: target - path: /etc/nailgun_systemtype - -mcollective: - conf: - main_collective: mcollective - collectives: mcollective - libdir: /usr/share/mcollective/plugins - logfile: /var/log/mcollective.log - loglevel: debug - daemonize: 0 - direct_addressing: 1 - ttl: 4294957 - securityprovider: psk - plugin.psk: {{ mcollective.pskey }} -{% if mcollective.connector == 'stomp' %} - connector = stomp - plugin.stomp.host: {{ mcollective.host }} - plugin.stomp.port: {{ mcollective.port|default(61613) }} - plugin.stomp.user: {{ mcollective.user }} - plugin.stomp.password: {{ mcollective.password }} -{% else %} - connector: rabbitmq - plugin.rabbitmq.vhost: {{ mcollective.vhost }} - plugin.rabbitmq.pool.size: 1 - plugin.rabbitmq.pool.1.host: {{ mcollective.host }} - plugin.rabbitmq.pool.1.port: {{ mcollective.port|default(61613) }} - plugin.rabbitmq.pool.1.user: {{ mcollective.user }} - plugin.rabbitmq.pool.1.password: {{ mcollective.password }} - plugin.rabbitmq.heartbeat_interval: 30 -{% endif %} - factsource: yaml - plugin.yaml: /etc/mcollective/facts.yaml - -puppet: - conf: - main: - logdir: /var/log/puppet - rundir: /var/run/puppet - ssldir: $vardir/ssl - pluginsync: true - agent: - classfile: $vardir/classes.txt - localconfig: $vardir/localconfig - server: {{ puppet.master }} - report: false - configtimeout: 600 - -runcmd: -{% if puppet.enable != 1 %} - - /usr/sbin/invoke-rc.d puppet stop - - /usr/sbin/update-rc.d -f puppet remove -{% endif %} -{% if mcollective.enable != 1 %} - - /usr/sbin/invoke-rc.d mcollective stop - - echo manual > /etc/init/mcollective.override -{% else %} - - rm -f /etc/init/mcollective.override - - service mcollective restart -{% endif %} - - iptables -t filter -F INPUT - - iptables -t filter -F FORWARD - final_message: "YAY! The system is finally up, after $UPTIME seconds" From 4c434ec247bed39ea2ca4b906f5eef07f28b5df1 Mon Sep 17 00:00:00 2001 From: Evgeniy L Date: Mon, 19 Oct 2015 20:52:53 +0300 Subject: [PATCH 06/15] Install discovery service --- bootstrap/playbooks/pxe.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bootstrap/playbooks/pxe.yaml b/bootstrap/playbooks/pxe.yaml index 4f9e13eb..4d1f8a4e 100644 --- a/bootstrap/playbooks/pxe.yaml +++ b/bootstrap/playbooks/pxe.yaml @@ -50,3 +50,7 @@ # Configure http server in order serve file in '/vagrant' directory - template: src=files/nginx_vagrant_dir.cfg dest=/etc/nginx/conf.d/vagrant_dir.conf - service: name=nginx state=restarted + + # Install discovery service + - shell: pip install git+https://github.com/rustyrobot/discovery.git + - shell: 'discovery &' From 48fcd503b1c88a51c3a5883725825982fdbd4384 Mon Sep 17 00:00:00 2001 From: Evgeniy L Date: Mon, 19 Oct 2015 20:58:50 +0300 Subject: [PATCH 07/15] Refactor not_provisioned_nodes template --- examples/provisioning/provision.py | 2 +- templates/not_provisioned_nodes.yaml | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/examples/provisioning/provision.py b/examples/provisioning/provision.py index 9f83d5d3..c063b3ef 100755 --- a/examples/provisioning/provision.py +++ b/examples/provisioning/provision.py @@ -26,7 +26,7 @@ master_node = filter(lambda n: n.name == 'node_master', node_resources)[0] # Dnsmasq resources for node in nodes_list: dnsmasq = vr.create('dnsmasq_{0}'.format(node['mac'].replace(':', '_')), 'resources/dnsmasq', {})[0] - node = filter(lambda n: n.name.endswith('node{0}'.format(node['mac']).replace(':', '_')), node_resources)[0] + node = filter(lambda n: n.name.endswith('node_{0}'.format(node['mac']).replace(':', '_')), node_resources)[0] master_node.connect(dnsmasq) node.connect(dnsmasq, {'admin_mac': 'exclude_mac_pxe'}) diff --git a/templates/not_provisioned_nodes.yaml b/templates/not_provisioned_nodes.yaml index ff459ca0..894d4b5d 100644 --- a/templates/not_provisioned_nodes.yaml +++ b/templates/not_provisioned_nodes.yaml @@ -1,24 +1,25 @@ id: not_provisioned_nodes resources: {% for node in nodes %} - - id: ssh_transport{{ node.mac | replace(':', '_') }} + {% set mac = node.mac | replace(':', '_') %} + - id: ssh_transport{{ mac }} from: resources/transport_ssh values: ssh_user: 'root' ssh_key: '/vagrant/tmp/keys/ssh_private' - - id: transports{{node.mac | replace(':', '_') }} + - id: transports{{mac}} from: resources/transports values: - transports:key: ssh_transport{{node.mac | replace(':', '_') }}::ssh_key - transports:user: ssh_transport{{node.mac | replace(':', '_') }}::ssh_user - transports:port: ssh_transport{{node.mac | replace(':', '_') }}::ssh_port - transports:name: ssh_transport{{node.mac | replace(':', '_') }}::name - - id: node{{node.mac | replace(':', '_') }} + transports:key: ssh_transport{{mac}}::ssh_key + transports:user: ssh_transport{{mac}}::ssh_user + transports:port: ssh_transport{{mac}}::ssh_port + transports:name: ssh_transport{{mac}}::name + - id: node_{{mac}} from: resources/not_provisioned_node values: ip: {{node.ip}} - transports_id: transports{{node.mac | replace(':', '_') }}::transports_id - name: node{{node.mac | replace(':', '_') }} + transports_id: transports{{mac}}::transports_id + name: node_{{mac}} admin_mac: {{node.mac}} {% endfor %} From 5594be3ddd49e5c090af64312083ee9f0eb14cfb Mon Sep 17 00:00:00 2001 From: Evgeniy L Date: Tue, 20 Oct 2015 14:35:38 +0300 Subject: [PATCH 08/15] Implement parallel node provisioning --- examples/provisioning/provision.py | 4 +--- examples/provisioning/provision.sh | 5 ++++- resources/dnsmasq/actions/exclude_mac_pxe.yaml | 11 +++++++++++ resources/not_provisioned_node/actions/run.sh | 2 ++ 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/examples/provisioning/provision.py b/examples/provisioning/provision.py index c063b3ef..5b58830e 100755 --- a/examples/provisioning/provision.py +++ b/examples/provisioning/provision.py @@ -13,9 +13,7 @@ from solar.events.api import add_event discovery_service = 'http://0.0.0.0:8881' -# DEBUG use a single node -nodes_list = [requests.get(discovery_service).json()[0]] - +nodes_list = requests.get(discovery_service).json() # Create slave node resources node_resources = vr.create('nodes', 'templates/not_provisioned_nodes.yaml', {'nodes': nodes_list}) diff --git a/examples/provisioning/provision.sh b/examples/provisioning/provision.sh index c7265890..d923fbef 100755 --- a/examples/provisioning/provision.sh +++ b/examples/provisioning/provision.sh @@ -4,6 +4,10 @@ set -eux DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +# Remove generated pxe exclude files +sudo rm -f /etc/dnsmasq.d/no_pxe_*.conf +sudo service dnsmasq restart + solar resource clear_all python "${DIR}"/provision.py @@ -11,4 +15,3 @@ solar changes stage solar changes process solar orch run-once last watch --color -n1 'solar orch report last' - diff --git a/resources/dnsmasq/actions/exclude_mac_pxe.yaml b/resources/dnsmasq/actions/exclude_mac_pxe.yaml index 91b16416..65c3d1be 100644 --- a/resources/dnsmasq/actions/exclude_mac_pxe.yaml +++ b/resources/dnsmasq/actions/exclude_mac_pxe.yaml @@ -3,4 +3,15 @@ tasks: - lineinfile: create=yes dest=/etc/dnsmasq.d/no_pxe_{{exclude_mac_pxe | replace(':', '_')}}.conf line="dhcp-host={{exclude_mac_pxe}},set:nopxe" + # FIXME: currently there is no way to specify + # policy not to run several tasks in parallel, + # so when we deploy several nodes in parallel + # it causes the problems when two tasks try + # to restart supervisor at the same time, and + # fail to do it. + - command: service dnsmasq status + register: log + until: log.stdout.find('running') > -1 + retries: 5 + delay: 2 - shell: service dnsmasq restart diff --git a/resources/not_provisioned_node/actions/run.sh b/resources/not_provisioned_node/actions/run.sh index 1ea0ed55..5a937ebb 100644 --- a/resources/not_provisioned_node/actions/run.sh +++ b/resources/not_provisioned_node/actions/run.sh @@ -4,3 +4,5 @@ set -eux # Fake run action which is required in order to make # dependency `run` -> `provision` + +exit 0 From ab4bba73ce32bd27d92cf4822dad5f6d69c6b23e Mon Sep 17 00:00:00 2001 From: Evgeniy L Date: Tue, 20 Oct 2015 18:10:39 +0300 Subject: [PATCH 09/15] Update readme with instruction, how to run provisioning --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index b3061490..77342057 100644 --- a/README.md +++ b/README.md @@ -296,3 +296,14 @@ Full documentation of individual functions is found in the `solar/template.py` f # Customizing vagrant-settings.yaml Solar is shipped with sane defaults in `vagrant-setting.yaml_defaults`. If you need to adjust them for your needs, e.g. changing resource allocation for VirtualBox machines, you should just compy the file to `vagrant-setting.yaml` and make your modifications. + +# Image based provisioning with Solar + +* In `vagrant-setting.yaml_defaults` file uncomment `preprovisioned: false` line. +* Run `vagrant up`, it will take some time because it builds image for bootstrap. +* Currently in order to perform provisioning, pre-built images from Fuel can be used + * Download images [using this link](https://drive.google.com/file/d/0B7I3b5vI7ZYXM0FPTDJEdjg0Qnc/view). + * Login into vm `vagrant ssh solar-dev` + * Go to `cd /vagrant/tmp/` directory + * Untar the images `tar vxf targetimages.tar` +* Now you can run provisioning `/vagrant/examples/provisioning/provision.sh` From 78ec365235b5db619cac0104a8e7b52bf9c9c608 Mon Sep 17 00:00:00 2001 From: Evgeniy L Date: Tue, 20 Oct 2015 18:14:45 +0300 Subject: [PATCH 10/15] Specify vagrant-settings.yaml file in readme to enable provisioning --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 77342057..105ba695 100644 --- a/README.md +++ b/README.md @@ -299,7 +299,7 @@ Solar is shipped with sane defaults in `vagrant-setting.yaml_defaults`. If you n # Image based provisioning with Solar -* In `vagrant-setting.yaml_defaults` file uncomment `preprovisioned: false` line. +* In `vagrant-setting.yaml_defaults` or `vagrant-settings.yaml` file uncomment `preprovisioned: false` line. * Run `vagrant up`, it will take some time because it builds image for bootstrap. * Currently in order to perform provisioning, pre-built images from Fuel can be used * Download images [using this link](https://drive.google.com/file/d/0B7I3b5vI7ZYXM0FPTDJEdjg0Qnc/view). From 06defa9601576c634da64bb37ed7562bbf922547 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Nowak?= Date: Tue, 20 Oct 2015 19:24:54 +0200 Subject: [PATCH 11/15] set target in orch to location_id if not given --- solar/solar/orchestration/graph.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/solar/solar/orchestration/graph.py b/solar/solar/orchestration/graph.py index 8370e289..3432f688 100644 --- a/solar/solar/orchestration/graph.py +++ b/solar/solar/orchestration/graph.py @@ -25,7 +25,7 @@ from collections import Counter from solar.interfaces.db import get_db - +from solar.core import resource db = get_db() @@ -40,6 +40,9 @@ def save_graph(graph): db.create_relation_str(uid, n, type_=db.RELATION_TYPES.graph_to_node) for u, v, properties in graph.edges(data=True): + if not 'target' in properties: + resource_name = u.split('.', 1)[0] + properties['target'] = resource.load(resource_name).args['location_id'] type_ = db.RELATION_TYPES.plan_edge.name + ':' + uid db.create_relation_str(u, v, properties, type_=type_) From 67700d1e8bb482a039d7e493e863284a31a59a8a Mon Sep 17 00:00:00 2001 From: Jedrzej Nowak Date: Tue, 20 Oct 2015 20:22:53 +0200 Subject: [PATCH 12/15] make target=location_id on graph creation --- solar/solar/events/controls.py | 6 ++++++ solar/solar/orchestration/graph.py | 5 +---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/solar/solar/events/controls.py b/solar/solar/events/controls.py index 19093335..19432359 100644 --- a/solar/solar/events/controls.py +++ b/solar/solar/events/controls.py @@ -96,8 +96,11 @@ class React(Event): if self.parent_node in changes_graph: if self.child_node not in changes_graph: + from solar.core import resource + location_id = resource.load(self.child).args['location_id'] changes_graph.add_node( self.child_node, status='PENDING', + target=location_id, errmsg=None, type='solar_resource', args=[self.child, self.child_action]) @@ -112,7 +115,10 @@ class StateChange(Event): def insert(self, changed_resources, changes_graph): changed_resources.append(self.parent) + from solar.core import resource + location_id = resource.load(self.parent).args['location_id'] changes_graph.add_node( self.parent_node, status='PENDING', + target=location_id, errmsg=None, type='solar_resource', args=[self.parent, self.parent_action]) diff --git a/solar/solar/orchestration/graph.py b/solar/solar/orchestration/graph.py index 3432f688..8370e289 100644 --- a/solar/solar/orchestration/graph.py +++ b/solar/solar/orchestration/graph.py @@ -25,7 +25,7 @@ from collections import Counter from solar.interfaces.db import get_db -from solar.core import resource + db = get_db() @@ -40,9 +40,6 @@ def save_graph(graph): db.create_relation_str(uid, n, type_=db.RELATION_TYPES.graph_to_node) for u, v, properties in graph.edges(data=True): - if not 'target' in properties: - resource_name = u.split('.', 1)[0] - properties['target'] = resource.load(resource_name).args['location_id'] type_ = db.RELATION_TYPES.plan_edge.name + ':' + uid db.create_relation_str(u, v, properties, type_=type_) From 169dbc3ad3c8810fbd09bd078b21560ac4d280e3 Mon Sep 17 00:00:00 2001 From: Jedrzej Nowak Date: Tue, 20 Oct 2015 23:04:40 +0200 Subject: [PATCH 13/15] Revert default limits --- solar/solar/orchestration/limits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solar/solar/orchestration/limits.py b/solar/solar/orchestration/limits.py index fefe5f7c..cae4f6c6 100644 --- a/solar/solar/orchestration/limits.py +++ b/solar/solar/orchestration/limits.py @@ -72,5 +72,5 @@ def target_based_rule(dg, inprogress, item, limit=1): return limit > target_count -def items_rule(dg, inprogress, item, limit=1): +def items_rule(dg, inprogress, item, limit=100): return len(inprogress) < limit From e93556965d2ff79f5a146ba61fdf20a3750c622e Mon Sep 17 00:00:00 2001 From: Jedrzej Nowak Date: Wed, 21 Oct 2015 11:34:09 +0200 Subject: [PATCH 14/15] Added comments about circular imports --- solar/solar/events/controls.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/solar/solar/events/controls.py b/solar/solar/events/controls.py index 19432359..f31442ea 100644 --- a/solar/solar/events/controls.py +++ b/solar/solar/events/controls.py @@ -96,6 +96,7 @@ class React(Event): if self.parent_node in changes_graph: if self.child_node not in changes_graph: + # TODO: solve this circular import problem from solar.core import resource location_id = resource.load(self.child).args['location_id'] changes_graph.add_node( @@ -115,6 +116,7 @@ class StateChange(Event): def insert(self, changed_resources, changes_graph): changed_resources.append(self.parent) + # TODO: solve this circular import problem from solar.core import resource location_id = resource.load(self.parent).args['location_id'] changes_graph.add_node( From 79c3138e69e77e2eb991705bbeb6058069fda167 Mon Sep 17 00:00:00 2001 From: Jedrzej Nowak Date: Wed, 21 Oct 2015 11:49:32 +0200 Subject: [PATCH 15/15] Added comment about exceptions in controls --- solar/solar/events/controls.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/solar/solar/events/controls.py b/solar/solar/events/controls.py index f31442ea..9412af76 100644 --- a/solar/solar/events/controls.py +++ b/solar/solar/events/controls.py @@ -98,7 +98,13 @@ class React(Event): if self.child_node not in changes_graph: # TODO: solve this circular import problem from solar.core import resource - location_id = resource.load(self.child).args['location_id'] + try: + loaded_resource = resource.load(self.parent) + except KeyError: + # orm throws this error when we're NOT using resource there + location_id = None + else: + location_id = loaded_resource.args['location_id'] changes_graph.add_node( self.child_node, status='PENDING', target=location_id, @@ -118,7 +124,13 @@ class StateChange(Event): changed_resources.append(self.parent) # TODO: solve this circular import problem from solar.core import resource - location_id = resource.load(self.parent).args['location_id'] + try: + loaded_resource = resource.load(self.parent) + except KeyError: + # orm throws this error when we're NOT using resource there + location_id = None + else: + location_id = loaded_resource.args['location_id'] changes_graph.add_node( self.parent_node, status='PENDING', target=location_id,