Revert "Remove deprecated 'fuel-bootstrap-image-builder'"

Since some functionality is missing in 'fuel-bootstrap' that is needed
for Ironic, we have to revert fuel-bootstrap-image-builder.
Once bugs in fuel-bootstrap are fixed and fuel-bootstrap proper tested
with Ironic, Ironic can be switched to fuel-bootstrap.
Related-Bug: #1527605
Related-Bug: #1527608

This reverts commit 74e9affd54.

Change-Id: Ic8738fdf386ddb4770a164b599e0c3ac609fb208
This commit is contained in:
Vasyl Saienko 2015-12-18 13:37:27 +00:00
parent 3faa824728
commit 5d4cf85fe3
23 changed files with 1320 additions and 1 deletions

View File

@ -1,5 +1,7 @@
.PHONY: bootstrap clean clean-bootstrap
include $(SOURCE_DIR)/bootstrap/ubuntu.mk
bootstrap: $(ARTS_DIR)/$(BOOTSTRAP_ART_NAME)
$(ARTS_DIR)/$(BOOTSTRAP_ART_NAME): \

13
bootstrap/ubuntu.mk Normal file
View File

@ -0,0 +1,13 @@
.PHONY: bootstrap-ubuntu
$(BUILD_DIR)/bootstrap/fuel-bootstrap-image-builder-rpm.done: top_builddir:=$(BUILD_DIR)/ubuntu-bootstrap/fuel-bootstrap-image-builder
$(BUILD_DIR)/bootstrap/fuel-bootstrap-image-builder-rpm.done: $(SOURCE_DIR)/fuel-bootstrap-image-builder/Makefile
mkdir -p $(top_builddir)
$(MAKE) -C $(SOURCE_DIR)/fuel-bootstrap-image-builder rpm top_builddir=$(top_builddir) VERSION=$(PRODUCT_VERSION).0
mkdir -p $(BUILD_DIR)/packages/rpm/RPMS
find $(top_builddir) -type f -name '*.rpm' | \
xargs cp -a --target-directory=$(BUILD_DIR)/packages/rpm/RPMS
$(ACTION.TOUCH)
bootstrap-ubuntu: $(BUILD_DIR)/bootstrap/fuel-bootstrap-image-rpm.done

View File

@ -0,0 +1,52 @@
VERSION?=8.0.0
top_srcdir:=$(shell pwd)
ubuntu_DATA:=$(shell cd $(top_srcdir) && find share -type f)
top_builddir?=$(shell pwd)
-include config.mk
PREFIX?=/usr
all:
@echo nop
install:
install -d -m 755 $(DESTDIR)$(PREFIX)/bin
install -d -m 755 $(DESTDIR)$(PREFIX)/share/fuel-bootstrap-image
install -m 755 -t $(DESTDIR)$(PREFIX)/bin $(top_srcdir)/bin/fuel-bootstrap-image
install -m 755 -t $(DESTDIR)$(PREFIX)/bin $(top_srcdir)/bin/fuel-bootstrap-image-set
tar cf - -C $(top_srcdir) share | tar xf - -C $(DESTDIR)$(PREFIX)
dist: $(top_builddir)/fuel-bootstrap-image-builder-$(VERSION).tar.gz
$(top_builddir)/fuel-bootstrap-image-builder-$(VERSION).tar.gz: STAGEDIR:=$(top_builddir)/dist/fuel-bootstrap-image-builder
$(top_builddir)/fuel-bootstrap-image-builder-$(VERSION).tar.gz: bin/fuel-bootstrap-image $(ubuntu_DATA) Makefile configure
mkdir -p $(STAGEDIR)/share
mkdir -p $(STAGEDIR)/bin
tar cf - -C $(top_srcdir) bin share | tar xf - -C $(STAGEDIR)
cp -a $(top_srcdir)/Makefile $(top_srcdir)/configure $(top_srcdir)/fuel-bootstrap-image-builder.spec $(STAGEDIR)
tar czf $@.tmp -C $(dir $(STAGEDIR)) $(notdir $(STAGEDIR))
mv $@.tmp $@
rpm: SANDBOX:=$(top_builddir)/rpmbuild
rpm: $(top_builddir)/fuel-bootstrap-image-builder-$(VERSION).tar.gz fuel-bootstrap-image-builder.spec
rm -rf $(SANDBOX)
mkdir -p $(SANDBOX)/SOURCES $(SANDBOX)/SPECS $(SANDBOX)/tmp
cp -a $< $(SANDBOX)/SOURCES
cp -a $(top_srcdir)/fuel-bootstrap-image-builder.spec $(SANDBOX)/SPECS
fakeroot rpmbuild --nodeps \
--define '_tmppath $(SANDBOX)/tmp' \
--define '_topdir $(SANDBOX)' \
--define 'version $(VERSION)' \
-ba $(SANDBOX)/SPECS/fuel-bootstrap-image-builder.spec
clean:
-@rm -f $(top_builddir)/config.mk
distclean: clean
-@rm -f $(top_builddir)/fuel-bootstrap-image-builder-$(VERSION).tar.gz
-@rm -rf $(top_builddir)/rpmbuild
-@rm -rf $(top_builddir)/dist
.PHONY: all install dist clean rpm

View File

@ -0,0 +1,438 @@
#!/bin/sh
set -ex
MYSELF="${0##*/}"
bindir="${0%/*}"
datadir="${bindir%/*}/share/fuel-bootstrap-image"
global_conf="/etc/fuel-bootstrap-image.conf"
[ -r "$global_conf" ] && . "$global_conf"
[ -z "$MOS_VERSION" ] && MOS_VERSION="8.0"
[ -z "$DISTRO_RELEASE" ] && DISTRO_RELEASE="trusty"
[ -z "$MIRROR_DISTRO" ] && MIRROR_DISTRO="http://archive.ubuntu.com/ubuntu"
[ -z "$MIRROR_MOS" ] && MIRROR_MOS="http://mirror.fuel-infra.org/mos-repos/ubuntu/$MOS_VERSION"
[ -z "$KERNEL_FLAVOR" ] && KERNEL_FLAVOR="-generic-lts-trusty"
[ -z "$ARCH" ] && ARCH="amd64"
[ -z "$DESTDIR" ] && DESTDIR="/var/www/nailgun/bootstrap/ubuntu"
if [ -z "${EXTRA_CONF_FILES+x}" ]; then
EXTRA_CONF_FILES="$datadir/ubuntu/files"
fi
# Packages required for the master node to discover a bootstrap node
BOOTSTRAP_FUEL_PKGS_DFLT="openssh-client openssh-server ntp mcollective nailgun-agent nailgun-mcagents network-checker fuel-agent"
[ -z "$BOOTSTRAP_FUEL_PKGS" ] && BOOTSTRAP_FUEL_PKGS="$BOOTSTRAP_FUEL_PKGS_DFLT"
if [ -n "$http_proxy" ]; then
export HTTP_PROXY="$http_proxy"
elif [ -n "$HTTP_PROXY" ]; then
export http_proxy="$HTTP_PROXY"
fi
# Kernel, firmware, live boot
BOOTSTRAP_PKGS="ubuntu-minimal live-boot live-boot-initramfs-tools linux-image${KERNEL_FLAVOR} linux-firmware linux-firmware-nonfree"
# compress initramfs with xz, make squashfs root filesystem image
BOOTSTRAP_PKGS="$BOOTSTRAP_PKGS xz-utils squashfs-tools"
# Smaller tools providing the standard ones.
# - mdadm depends on mail-transport-agent, default one is postfix => use msmtp instead
BOOTSTRAP_PKGS="$BOOTSTRAP_PKGS msmtp-mta"
#CentOS 7 has $PATH variable which is different from Ubuntu one.
#This eliminates the difference
export PATH=$PATH:/sbin:/bin
apt_setup ()
{
local root="$1"
local sources_list="${root}/etc/apt/sources.list"
local apt_prefs="${root}/etc/apt/preferences"
local mos_codename="mos${MOS_VERSION}"
local release_file="$MIRROR_MOS/dists/$mos_codename/Release"
if ! wget -q -O /dev/null "$release_file" 2>/dev/null; then
cat >&2 <<-EOF
$MYSELF: broken MOS repo: no $release_file"
EOF
exit 2
fi
mkdir -p "${sources_list%/*}"
cat > "$sources_list" <<-EOF
deb $MIRROR_DISTRO ${DISTRO_RELEASE} main universe multiverse restricted
deb $MIRROR_DISTRO ${DISTRO_RELEASE}-security main universe multiverse restricted
deb $MIRROR_DISTRO ${DISTRO_RELEASE}-updates main universe multiverse restricted
deb $MIRROR_MOS ${mos_codename} main
deb $MIRROR_MOS ${mos_codename}-security main
deb $MIRROR_MOS ${mos_codename}-updates main
deb $MIRROR_MOS ${mos_codename}-holdback main
EOF
if [ -n "$EXTRA_DEB_REPOS" ]; then
l="$EXTRA_DEB_REPOS"
IFS='|'
set -- $l
unset IFS
for repo; do
echo "$repo"
done >> "$sources_list"
fi
cat > "$apt_prefs" <<-EOF
Package: *
Pin: release o=Mirantis, n=mos${MOS_VERSION}
Pin-Priority: 1101
Package: *
Pin: release o=Mirantis, n=${DISTRO_RELEASE}
Pin-Priority: 1101
EOF
if [ -n "$HTTP_PROXY" ]; then
cat > "$root/etc/apt/apt.conf.d/01mirantis-use-proxy" <<-EOF
Acquire::http::Proxy "$HTTP_PROXY";
EOF
fi
}
run_apt_get ()
{
local root="$1"
shift
chroot "$root" env \
LC_ALL=C \
DEBIAN_FRONTEND=noninteractive \
DEBCONF_NONINTERACTIVE_SEEN=true \
TMPDIR=/tmp \
TMP=/tmp \
apt-get $@
}
dpkg_is_too_old ()
{
# XXX: dpkg-deb versions older than 1.15.6 can't handle data.tar.xz
# (which is the default payload of Ubuntu packages)
# Such an ancient version of dpkg is shipped with CentOS 6.[56]
local dpkg_version
local dpkg_major_version
local dpkg_minor_version
local dpkg_patch_version
if ! dpkg-deb --help >/dev/null 2>&1; then
return 0
fi
dpkg_version=`dpkg-deb --version | sed -rne '1 s/^.*\s+version\s+([0-9]+)\.([0-9]+)\.([0-9]+).*/\1.\2.\3/p'`
[ -z "$dpkg_version" ] && return 0
IFS='.'
set -- $dpkg_version
unset IFS
dpkg_major_version="$1"
dpkg_minor_version="$2"
dpkg_patch_version="$3"
if [ $dpkg_major_version -le 1 ] && [ $dpkg_minor_version -le 15 ] && [ $dpkg_patch_version -lt 6 ]; then
echo "DEBUG: $MYSELF: dpkg is too old, using ar to unpack debian packages" >&2
return 0
fi
return 1
}
run_debootstrap ()
{
local root="$1"
[ -z "$root" ] && exit 1
local insecure="--no-check-gpg"
local extractor=''
if dpkg_is_too_old; then
# Ubuntu packages use data.tar.xz payload. Ancient versions of
# dpkg (in particular the ones shipped with CentOS 6.x) can't
# handle such packages. Tell debootstrap to use ar instead to
# avoid the failure.
extractor='--extractor=ar'
fi
env \
LC_ALL=C \
DEBIAN_FRONTEND=noninteractive \
DEBCONF_NONINTERACTIVE_SEEN=true \
debootstrap $insecure $extractor --arch=${ARCH} ${DISTRO_RELEASE} "$root" $MIRROR_DISTRO
}
install_packages ()
{
local root="$1"
shift
echo "INFO: $MYSELF: installing pkgs: $*" >&2
run_apt_get "$root" install --yes $@
}
upgrade_chroot ()
{
local root="$1"
run_apt_get "$root" update
if ! mountpoint -q "$root/proc"; then
mount -t proc bootstrapproc "$root/proc"
fi
run_apt_get "$root" dist-upgrade --yes
}
add_local_mos_repo ()
{
# we need the local APT repo (/var/www/nailgun/ubuntu/x86_64)
# before web server is up and running => use bind mount
local root="$1"
# TODO(asheplyakov): use proper arch name (amd64)
local local_repo="/var/www/nailgun/ubuntu/x86_64"
local path_in_chroot="/tmp/local-apt"
local source_parts_d="${root}/etc/apt/sources.list.d"
# TODO(asheplyakov): update the codename after repo get fixed
local mos_codename="mos${MOS_VERSION}"
mkdir -p "${root}${path_in_chroot}" "${source_parts_d}"
mount -o bind "$local_repo" "${root}${path_in_chroot}"
mount -o remount,ro,bind "${root}${path_in_chroot}"
cat > "${source_parts_d}/nailgun-local.list" <<-EOF
deb file://${path_in_chroot} ${mos_codename} main
EOF
}
allow_insecure_apt ()
{
local root="$1"
local conflet="${root}/etc/apt/apt.conf.d/02mirantis-insecure-apt"
mkdir -p "${conflet%/*}"
echo 'APT::Get::AllowUnauthenticated 1;' > "$conflet"
}
suppress_services_start ()
{
local root="$1"
local policy_rc="$root/usr/sbin/policy-rc.d"
mkdir -p "${policy_rc%/*}"
cat > "$policy_rc" <<-EOF
#!/bin/sh
# suppress services start in the staging chroot
exit 101
EOF
chmod 755 "$policy_rc"
}
propagate_host_resolv_conf ()
{
local root="$1"
mkdir -p "$root/etc"
for conf in "/etc/resolv.conf" "/etc/hosts"; do
if [ -e "${root}${conf}" ]; then
cp -a "${root}${conf}" "${root}${conf}.bak"
fi
done
}
restore_resolv_conf ()
{
local root="$1"
for conf in "/etc/resolv.conf" "/etc/hosts"; do
if [ -e "${root}${conf}.bak" ]; then
rm -f "${root}${conf}"
cp -a "${root}${conf}.bak" "${root}${conf}"
fi
done
}
make_utf8_locale ()
{
local root="$1"
chroot "$root" /bin/sh -c "locale-gen en_US.UTF-8 && dpkg-reconfigure locales"
}
# XXX: CentOS version of debootstrap produces a broken /dev:
# /dev/fd is a directory instead of a symlink to /proc/self/fd
dev_fixup ()
{
local root="$1"
if [ -z "$root" ]; then
echo "*** Error: $MYSELF: dev_fixup: \$root is not specified" >&2
exit 1
fi
mkdir -p -m755 "$root/dev"
if [ ! -L "$root/dev/fd" ]; then
rm -rf "$root/dev/fd"
# Ask MAKEDEV to re-create /dev/fd, /dev/stdin, etc
chroot "$root" /bin/sh -c "cd /dev && MAKEDEV fd"
fi
if [ ! -c "$root/dev/null" ]; then
# basic device nodes are missing => create them
chroot "$root" /bin/sh -c "cd /dev && MAKEDEV std"
fi
}
copy_conf_files ()
{
local root="$1"
if [ -n "$EXTRA_CONF_FILES" ]; then
rsync -rlptDK "${EXTRA_CONF_FILES%/}/" "${root%/}"
fi
sed -i $root/etc/shadow -e '/^root/c\root:$6$oC7haQNQ$LtVf6AI.QKn9Jb89r83PtQN9fBqpHT9bAFLzy.YVxTLiFgsoqlPY3awKvbuSgtxYHx4RUcpUqMotp.WZ0Hwoj.:15441:0:99999:7:::'
}
install_ssh_keys ()
{
local root="$1"
shift
if [ -z "$*" ]; then
echo "*** Error: $MYSELF: no ssh keys specified" >&2
exit 1
fi
local authorized_keys="$root/root/.ssh/authorized_keys"
local dot_ssh_dir="${authorized_keys%/*}"
if [ ! -d "${dot_ssh_dir}" ]; then
mkdir -p -m700 "${dot_ssh_dir}"
fi
for key; do
if [ ! -r "$key" ]; then
echo "*** Error: $MYSELF: no such file: $key" >&2
exit 1
fi
done
cat $@ > "$authorized_keys"
chmod 640 "$authorized_keys"
}
cleanup_chroot ()
{
local root="$1"
[ -z "$root" ] && exit 1
signal_chrooted_processes "$root" SIGTERM
signal_chrooted_processes "$root" SIGKILL
umount "${root}/tmp/local-apt" 2>/dev/null || umount -l "${root}/tmp/local-apt"
rm -f "${root}/etc/apt/sources.list.d/nailgun-local.list"
rm -rf $root/var/cache/apt/archives/*.deb
rm -f $root/etc/apt/apt.conf.d/01mirantis-use-proxy.conf
rm -f $root/var/log/bootstrap.log
rm -rf $root/tmp/*
rm -rf $root/run/*
}
recompress_initramfs ()
{
local root="$1"
local initramfs_conf="$root/etc/initramfs-tools/initramfs.conf"
sed -i $initramfs_conf -re 's/COMPRESS\s*=\s*gzip/COMPRESS=xz/'
rm -f $root/boot/initrd*
chroot "$root" \
env \
LC_ALL=C \
DEBIAN_FRONTEND=noninteractive \
DEBCONF_NONINTERACTIVE_SEEN=true \
TMPDIR=/tmp \
TMP=/tmp \
update-initramfs -c -k all
}
mk_squashfs_image ()
{
local root="$1"
local tmp="$$"
[ -d "$DESTDIR" ] || mkdir -p "$DESTDIR"
cp -a $root/boot/initrd* $DESTDIR/initramfs.img.${tmp}
cp -a $root/boot/vmlinuz* $DESTDIR/linux.${tmp}
rm -f $root/boot/initrd*
rm -f $root/boot/vmlinuz*
# run mksquashfs inside a chroot (Ubuntu kernel will be able to
# mount an image produced by Ubuntu squashfs-tools)
mount -t tmpfs -o rw,nodev,nosuid,noatime,mode=0755,size=4M mnt${tmp} "$root/mnt"
mkdir -p "$root/mnt/src" "$root/mnt/dst"
mount -o bind "$root" "$root/mnt/src"
mount -o remount,bind,ro "$root/mnt/src"
mount -o bind "$DESTDIR" "$root/mnt/dst"
if ! mountpoint -q "$root/proc"; then
mount -t proc sandboxproc "$root/proc"
fi
chroot "$root" mksquashfs /mnt/src /mnt/dst/root.squashfs.${tmp} -comp xz -no-progress -noappend
mv $DESTDIR/initramfs.img.${tmp} $DESTDIR/initramfs.img
mv $DESTDIR/linux.${tmp} $DESTDIR/linux
mv $DESTDIR/root.squashfs.${tmp} $DESTDIR/root.squashfs
chmod 644 $DESTDIR/initramfs.img $DESTDIR/linux $DESTDIR/root.squashfs
umount "$root/mnt/dst"
umount "$root/mnt/src"
umount "$root/mnt"
}
build_image ()
{
local root="$1"
chmod 755 "$root"
suppress_services_start "$root"
run_debootstrap "$root"
dev_fixup "$root"
suppress_services_start "$root"
propagate_host_resolv_conf "$root"
make_utf8_locale "$root"
apt_setup "$root"
add_local_mos_repo "$root"
allow_insecure_apt "$root"
upgrade_chroot "$root"
install_packages "$root" $BOOTSTRAP_PKGS $BOOTSTRAP_FUEL_PKGS
recompress_initramfs "$root"
copy_conf_files "$root"
if [ -n "$BOOTSTRAP_SSH_KEYS" ]; then
install_ssh_keys "$root" $BOOTSTRAP_SSH_KEYS
else
cat >&2 <<-EOF
$MYSELF: Warning: no ssh keys specified
$MYSELF: bootstrap nodes won't be available via ssh
EOF
fi
restore_resolv_conf "$root"
cleanup_chroot "$root"
mk_squashfs_image "$root"
}
root=`mktemp -d --tmpdir fuel-bootstrap-image.XXXXXXXXX`
main ()
{
build_image "$root"
}
signal_chrooted_processes ()
{
local root="$1"
local signal="${2:-SIGTERM}"
local max_attempts=10
local timeout=2
local count=0
local found_processes
[ ! -d "$root" ] && return 0
while [ $count -lt $max_attempts ]; do
found_processes=''
for pid in `fuser $root 2>/dev/null`; do
[ "$pid" = "kernel" ] && continue
if [ "`readlink /proc/$pid/root`" = "$root" ]; then
found_processes='yes'
kill "-${signal}" $pid
fi
done
[ -z "$found_processes" ] && break
count=$((count+1))
sleep $timeout
done
}
final_cleanup ()
{
signal_chrooted_processes "$root" SIGTERM
signal_chrooted_processes "$root" SIGKILL
for mnt in /tmp/local-apt /mnt/dst /mnt/src /mnt /proc; do
if mountpoint -q "${root}${mnt}"; then
umount "${root}${mnt}" || umount -l "${root}${mnt}" || true
fi
done
if [ -z "$SAVE_TEMPS" ]; then
rm -rf "$root"
fi
}
trap final_cleanup 0
trap final_cleanup HUP TERM INT QUIT
main

View File

@ -0,0 +1,83 @@
#!/bin/sh
# Script for switching between the Ubuntu and CentOS based bootstrap images.
# Usage: fuel-bootstrap-image-set centos|ubuntu
# FIXME(azvyagintsev) https://bugs.launchpad.net/fuel/+bug/1522066
set -e
MYSELF="${0##*/}"
ASTUTE_YAML="/etc/fuel/astute.yaml"
cobbler_manifest="/etc/puppet/modules/nailgun/examples/cobbler-only.pp"
astute_manifest="/etc/puppet/modules/nailgun/examples/astute-only.pp"
run_puppet () {
local container="$1"
local manifest="$2"
local ret=''
set +e
dockerctl shell "$container" puppet apply --detailed-exitcodes -dv "$manifest"
ret=$?
set -e
if [ "$ret" = "0" ] || [ "$ret" = "2" ]; then
return 0
else
cat >&2 <<-EOF
$MYSELF: puppet apply $manifest failed: exit code $ret
$MYSELF: container: $container
EOF
exit 1
fi
}
maybe_restart_dnsmasq () {
if ! dockerctl shell cobbler service dnsmasq status >/dev/null; then
dockerctl shell cobbler service dnsmasq restart
fi
}
verify_bootstrap_flavor () {
local flavor="$1"
if [ -z "$flavor" ]; then
cat >&2 <<-EOF
$MYSELF: error: no bootstrap image specified
Usage: $MYSELF centos|ubuntu
EOF
exit 1
fi
case "$flavor" in
centos|CentOS)
flavor='centos'
;;
ubuntu|Ubuntu)
flavor='ubuntu'
;;
*)
cat >&2 <<-EOF
$MYSELF: error: unknown bootstrap image: $flavor
$MYSELF: available bootstrap images: ubuntu, centos
EOF
exit 1
;;
esac
}
write_astute_yaml () {
local flavor="$1"
python <<-PYEOF
from fuelmenu.fuelmenu import Settings
conf = Settings().read("$ASTUTE_YAML").get('BOOTSTRAP', {})
conf['flavor'] = "$flavor"
Settings().write({'BOOTSTRAP': conf}, outfn="$ASTUTE_YAML", defaultsfile=None)
PYEOF
}
switch_bootstrap () {
local flavor="$1"
verify_bootstrap_flavor "$flavor"
write_astute_yaml "$flavor"
run_puppet cobbler "$cobbler_manifest"
# XXX: sometimes dnsmasq stops after cobbler sync
maybe_restart_dnsmasq
run_puppet astute "$astute_manifest"
dockerctl shell astute service astute restart
}
switch_bootstrap $1

21
fuel-bootstrap-image-builder/configure vendored Executable file
View File

@ -0,0 +1,21 @@
#!/bin/sh
set -e
# Stub configure script to make rpmbuild happy
PREFIX=''
for arg; do
case $arg in
--prefix)
shift
PREFIX="$arg"
;;
--prefix=*)
PREFIX="${arg##--prefix=*}"
;;
esac
done
cat > config.mk <<-EOF
PREFIX:=${PREFIX:-/usr}
EOF

View File

@ -0,0 +1,38 @@
%define name fuel-bootstrap-image-builder
%{!?version: %define version 8.0.0}
%{!?release: %define release 1}
Summary: Fuel bootstrap image generator
Name: %{name}
Version: %{version}
Release: %{release}
URL: http://github.com/asheplyakov/fuel-bootstrap-image
Source0: fuel-bootstrap-image-builder-%{version}.tar.gz
License: Apache
BuildRoot: %{_tmppath}/%{name}-%{version}-buildroot
Prefix: %{_prefix}
Requires: debootstrap, wget
BuildArch: noarch
%description
Fuel bootstrap image generator package
%prep
%autosetup -n %{name}
%build
%configure
%install
%make_install
mkdir -p %{buildroot}/var/www/nailgun/bootstrap/ubuntu
%clean
rm -rf $RPM_BUILD_ROOT
%files
%defattr(-,root,root)
%{_bindir}/*
%{_datadir}/fuel-bootstrap-image/*
%dir /var/www/nailgun/bootstrap/ubuntu

View File

@ -0,0 +1,16 @@
[options]
broken_system_clock = true
[problems]
# Superblock last mount time is in the future (PR_0_FUTURE_SB_LAST_MOUNT).
0x000031 = {
preen_ok = true
preen_nomessage = true
}
# Superblock last write time is in the future (PR_0_FUTURE_SB_LAST_WRITE).
0x000032 = {
preen_ok = true
preen_nomessage = true
}

View File

@ -0,0 +1,28 @@
/var/log/cron
/var/log/maillog
/var/log/messages
/var/log/secure
/var/log/spooler
/var/log/mcollective.log
/var/log/nailgun-agent.log
{
# This file is used for daily log rotations, do not use size options here
sharedscripts
daily
# rotate only if 30M size or bigger
minsize 30M
maxsize 50M
# truncate file, do not delete & recreate
copytruncate
# keep logs for XXX rotations
rotate 3
# compression will be postponed to the next rotation, if uncommented
compress
# ignore missing files
missingok
# do not rotate empty files
notifempty
postrotate
/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
endscript
}

View File

@ -0,0 +1,27 @@
main_collective = mcollective
collectives = mcollective
libdir = /usr/share/mcollective/plugins
logfile = /var/log/mcollective.log
loglevel = debug
direct_addressing = 1
daemonize = 0
# Set TTL to 1.5 hours
ttl = 5400
# Plugins
securityprovider = psk
plugin.psk = unset
connector = rabbitmq
plugin.rabbitmq.vhost = mcollective
plugin.rabbitmq.pool.size = 1
plugin.rabbitmq.pool.1.host =
plugin.rabbitmq.pool.1.port = 61613
plugin.rabbitmq.pool.1.user = mcollective
plugin.rabbitmq.pool.1.password = marionette
plugin.rabbitmq.heartbeat_interval = 30
# Facts
factsource = yaml
plugin.yaml = /etc/mcollective/facts.yaml

View File

@ -0,0 +1 @@
options mlx4_core port_type_array=2,2

View File

@ -0,0 +1,6 @@
#!/bin/sh -e
fix-configs-on-startup || true
flock -w 0 -o /var/lock/agent.lock -c "/usr/bin/nailgun-agent >> /var/log/nailgun-agent.log 2>&1" || true
touch /var/lock/subsys/local

View File

@ -0,0 +1,6 @@
# Log all messages with this template
$template CustomLog, "%$NOW%T%TIMESTAMP:8:$%Z %syslogseverity-text% %syslogtag% %msg%\n"
$ActionFileDefaultTemplate CustomLog
user.debug /var/log/messages

View File

@ -0,0 +1,20 @@
{
"watchlist": [
{"servers": [ {"host": "@MASTER_NODE_IP@"} ],
"watchfiles": [
{"tag": "bootstrap/dmesg", "files": ["/var/log/dmesg"]},
{"tag": "bootstrap/secure", "files": ["/var/log/secure"]},
{"tag": "bootstrap/messages", "files": ["/var/log/messages"]},
{"tag": "bootstrap/fuel-agent", "files": ["/var/log/fuel-agent.log"]},
{"tag": "bootstrap/mcollective", "log_type": "ruby",
"files": ["/var/log/mcollective.log"]},
{"tag": "bootstrap/agent", "log_type": "ruby",
"files": ["/var/log/nailgun-agent.log"]},
{"tag": "bootstrap/netprobe_sender", "log_type": "netprobe",
"files": ["/var/log/netprobe_sender.log"]},
{"tag": "bootstrap/netprobe_listener", "log_type": "netprobe",
"files": ["/var/log/netprobe_listener.log"]}
]
}
]
}

View File

@ -0,0 +1,20 @@
Protocol 2
SyslogFacility AUTHPRIV
PasswordAuthentication no
PubkeyAuthentication yes
ChallengeResponseAuthentication no
GSSAPIAuthentication no
UsePAM no
UseDNS no
# Accept locale-related environment variables
AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
AcceptEnv XMODIFIERS
Subsystem sftp /usr/lib/openssh/sftp-server
# Secure Ciphers and MACs
Ciphers aes256-ctr,aes192-ctr,aes128-ctr,arcfour256,arcfour128
MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,hmac-sha1

View File

@ -0,0 +1,30 @@
#!/bin/sh
masternode_ip=`sed -rn 's/^.*url=http:\/\/(([0-9]{1,3}\.){3}[0-9]{1,3}).*$/\1/ p' /proc/cmdline`
mco_user=$(sed 's/\ /\n/g' /proc/cmdline | grep mco_user | awk -F\= '{print $2}')
mco_pass=$(sed 's/\ /\n/g' /proc/cmdline | grep mco_pass | awk -F\= '{print $2}')
[ -z "$mco_user" ] && mco_user="mcollective"
[ -z "$mco_pass" ] && mco_pass="marionette"
# Send logs to master node.
sed -i /etc/send2syslog.conf -re "s/@MASTER_NODE_IP@/$masternode_ip/"
/usr/bin/send2syslog.py -i < /etc/send2syslog.conf
# Set up NTP
# Disable panic about huge clock offset
sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
sed -i '1 i tinker panic 0' /etc/ntp.conf
# Sync clock with master node
sed -i "/^\s*server\b/ d" /etc/ntp.conf
echo "server $masternode_ip burst iburst" >> /etc/ntp.conf
service ntp restart
# Update mcollective config
sed -i "s/^plugin.rabbitmq.pool.1.host\b.*$/plugin.rabbitmq.pool.1.host = $masternode_ip/" /etc/mcollective/server.cfg
sed -i "s/^plugin.rabbitmq.pool.1.user\b.*$/plugin.rabbitmq.pool.1.user = $mco_user/" /etc/mcollective/server.cfg
sed -i "s/^plugin.rabbitmq.pool.1.password\b.*$/plugin.rabbitmq.pool.1.password= $mco_pass/" /etc/mcollective/server.cfg
service mcollective restart

View File

@ -0,0 +1,505 @@
#!/usr/bin/env python
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from logging.handlers import SysLogHandler
from optparse import OptionParser
import os
import re
import signal
import sys
import time
# Add syslog levels to logging module.
logging.NOTICE = 25
logging.ALERT = 60
logging.EMERG = 70
logging.addLevelName(logging.NOTICE, 'NOTICE')
logging.addLevelName(logging.ALERT, 'ALERT')
logging.addLevelName(logging.EMERG, 'EMERG')
SysLogHandler.priority_map['NOTICE'] = 'notice'
SysLogHandler.priority_map['ALERT'] = 'alert'
SysLogHandler.priority_map['EMERG'] = 'emerg'
# Define data and message format according to RFC 5424.
rfc5424_format = '{version} {timestamp} {hostname} {appname} {procid}'\
' {msgid} {structured_data} {msg}'
date_format = '%Y-%m-%dT%H:%M:%SZ'
# Define global semaphore.
sending_in_progress = 0
# Define file types.
msg_levels = {'ruby': {'regex': '(?P<level>[DIWEF]), \[[0-9-]{10}T',
'levels': {'D': logging.DEBUG,
'I': logging.INFO,
'W': logging.WARNING,
'E': logging.ERROR,
'F': logging.FATAL
}
},
'syslog': {'regex': ('[0-9-]{10}T[0-9:]{8}Z (?P<level>'
'debug|info|notice|warning|err|crit|'
'alert|emerg)'),
'levels': {'debug': logging.DEBUG,
'info': logging.INFO,
'notice': logging.NOTICE,
'warning': logging.WARNING,
'err': logging.ERROR,
'crit': logging.CRITICAL,
'alert': logging.ALERT,
'emerg': logging.EMERG
}
},
'anaconda': {'regex': ('[0-9:]{8},[0-9]+ (?P<level>'
'DEBUG|INFO|WARNING|ERROR|CRITICAL)'),
'levels': {'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
},
'netprobe': {'regex': ('[0-9-]{10} [0-9:]{8},[0-9]+ (?P<level>'
'DEBUG|INFO|WARNING|ERROR|CRITICAL)'),
'levels': {'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
}
}
relevel_errors = {
'anaconda': [
{
'regex': 'Error downloading \
http://.*/images/(product|updates).img: HTTP response code said error',
'levelfrom': logging.ERROR,
'levelto': logging.WARNING
},
{
'regex': 'got to setupCdrom without a CD device',
'levelfrom': logging.ERROR,
'levelto': logging.WARNING
}
]
}
# Create a main logger.
logging.basicConfig(format='%(levelname)s: %(message)s')
main_logger = logging.getLogger()
main_logger.setLevel(logging.NOTSET)
class WatchedFile:
"""WatchedFile(filename) => Object that read lines from file if exist."""
def __init__(self, name):
self.name = name
self.fo = None
self.where = 0
def reset(self):
if self.fo:
self.fo.close()
self.fo = None
self.where = 0
def _checkRewrite(self):
try:
if os.stat(self.name)[6] < self.where:
self.reset()
except OSError:
self.close()
def readLines(self):
"""Return list of last append lines from file if exist."""
self._checkRewrite()
if not self.fo:
try:
self.fo = open(self.name, 'r')
except IOError:
return ()
lines = self.fo.readlines()
self.where = self.fo.tell()
return lines
def close(self):
self.reset()
class WatchedGroup:
"""Can send data from group of specified files to specified servers."""
def __init__(self, servers, files, name):
self.servers = servers
self.files = files
self.log_type = files.get('log_type', 'syslog')
self.name = name
self._createLogger()
def _createLogger(self):
self.watchedfiles = []
logger = logging.getLogger(self.name)
logger.setLevel(logging.NOTSET)
logger.propagate = False
# Create log formatter.
format_dict = {'version': '1',
'timestamp': '%(asctime)s',
'hostname': config['hostname'],
'appname': self.files['tag'],
'procid': '-',
'msgid': '-',
'structured_data': '-',
'msg': '%(message)s'
}
log_format = rfc5424_format.format(**format_dict)
formatter = logging.Formatter(log_format, date_format)
# Add log handler for each server.
for server in self.servers:
port = 'port' in server and server['port'] or 514
syslog = SysLogHandler((server["host"], port))
syslog.setFormatter(formatter)
logger.addHandler(syslog)
self.logger = logger
# Create WatchedFile objects from list of files.
for name in self.files['files']:
self.watchedfiles.append(WatchedFile(name))
def send(self):
"""Send append data from files to servers."""
for watchedfile in self.watchedfiles:
for line in watchedfile.readLines():
line = line.strip()
level = self._get_msg_level(line, self.log_type)
# Get rid of duplicated information in anaconda logs
line = re.sub(
msg_levels[self.log_type]['regex'] + "\s*:?\s?",
"",
line
)
# Ignore meaningless errors
try:
for r in relevel_errors[self.log_type]:
if level == r['levelfrom'] and \
re.match(r['regex'], line):
level = r['levelto']
except KeyError:
pass
self.logger.log(level, line)
main_logger and main_logger.log(
level,
'From file "%s" send: %s' % (watchedfile.name, line)
)
@staticmethod
def _get_msg_level(line, log_type):
if log_type in msg_levels:
msg_type = msg_levels[log_type]
regex = re.match(msg_type['regex'], line)
if regex:
return msg_type['levels'][regex.group('level')]
return logging.INFO
def sig_handler(signum, frame):
"""Send all new data when signal arrived."""
if not sending_in_progress:
send_all()
exit(signum)
else:
config['run_once'] = True
def send_all():
"""Send any updates."""
for group in watchlist:
group.send()
def main_loop():
"""Periodicaly call sendlogs() for each group in watchlist."""
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
while watchlist:
time.sleep(0.5)
send_all()
# If asked to run_once, exit now
if config['run_once']:
break
class Config:
"""Collection of config generation methods.
Usage: config = Config.getConfig()
"""
@classmethod
def getConfig(cls):
"""Generate config from command line arguments and config file."""
# example_config = {
# "daemon": True,
# "run_once": False,
# "debug": False,
# "watchlist": [
# {"servers": [ {"host": "localhost", "port": 514} ],
# "watchfiles": [
# {"tag": "anaconda",
# "log_type": "anaconda",
# "files": ["/tmp/anaconda.log",
# "/mnt/sysimage/root/install.log"]
# }
# ]
# }
# ]
# }
default_config = {"daemon": True,
"run_once": False,
"debug": False,
"hostname": cls._getHostname(),
"watchlist": []
}
# First use default config as running config.
config = dict(default_config)
# Get command line options and validate it.
cmdline = cls.cmdlineParse()[0]
# Check config file source and read it.
if cmdline.config_file or cmdline.stdin_config:
try:
if cmdline.stdin_config is True:
fo = sys.stdin
else:
fo = open(cmdline.config_file, 'r')
parsed_config = json.load(fo)
if cmdline.debug:
print(parsed_config)
except IOError: # Raised if IO operations failed.
main_logger.error("Can not read config file %s\n" %
cmdline.config_file)
exit(1)
except ValueError as e: # Raised if json parsing failed.
main_logger.error("Can not parse config file. %s\n" %
e.message)
exit(1)
# Validate config from config file.
cls.configValidate(parsed_config)
# Copy gathered config from config file to running config
# structure.
for key, value in parsed_config.items():
config[key] = value
else:
# If no config file specified use watchlist setting from
# command line.
watchlist = {"servers": [{"host": cmdline.host,
"port": cmdline.port}],
"watchfiles": [{"tag": cmdline.tag,
"log_type": cmdline.log_type,
"files": cmdline.watchfiles}]}
config['watchlist'].append(watchlist)
# Apply behavioural command line options to running config.
if cmdline.no_daemon:
config["daemon"] = False
if cmdline.run_once:
config["run_once"] = True
if cmdline.debug:
config["debug"] = True
return config
@staticmethod
def _getHostname():
"""Generate hostname by BOOTIF kernel option or use os.uname()."""
with open('/proc/cmdline') as fo:
cpu_cmdline = fo.readline().strip()
regex = re.search('(?<=BOOTIF=)([0-9a-fA-F-]*)', cpu_cmdline)
if regex:
mac = regex.group(0).upper()
return ''.join(mac.split('-'))
return os.uname()[1]
@staticmethod
def cmdlineParse():
"""Parse command line config options."""
parser = OptionParser()
parser.add_option("-c", "--config", dest="config_file", metavar="FILE",
help="Read config from FILE.")
parser.add_option("-i", "--stdin", dest="stdin_config", default=False,
action="store_true", help="Read config from Stdin.")
# FIXIT Add optionGroups.
parser.add_option("-r", "--run-once", dest="run_once",
action="store_true", help="Send all data and exit.")
parser.add_option("-n", "--no-daemon", dest="no_daemon",
action="store_true", help="Do not daemonize.")
parser.add_option("-d", "--debug", dest="debug",
action="store_true", help="Print debug messages.")
parser.add_option("-t", "--tag", dest="tag", metavar="TAG",
help="Set tag of sending messages as TAG.")
parser.add_option("-T", "--type", dest="log_type", metavar="TYPE",
default='syslog',
help="Set type of files as TYPE"
"(default: %default).")
parser.add_option("-f", "--watchfile", dest="watchfiles",
action="append",
metavar="FILE", help="Add FILE to watchlist.")
parser.add_option("-s", "--host", dest="host", metavar="HOSTNAME",
help="Set destination as HOSTNAME.")
parser.add_option("-p", "--port", dest="port", type="int", default=514,
metavar="PORT",
help="Set remote port as PORT (default: %default).")
options, args = parser.parse_args()
# Validate gathered options.
if options.config_file and options.stdin_config:
parser.error("You must not set both options --config"
" and --stdin at the same time.")
exit(1)
if ((options.config_file or options.stdin_config) and
(options.tag or options.watchfiles or options.host)):
main_logger.warning("If --config or --stdin is set up options"
" --tag, --watchfile, --type,"
" --host and --port will be ignored.")
if (not (options.config_file or options.stdin_config) and
not (options.tag and options.watchfiles and options.host)):
parser.error("Options --tag, --watchfile and --host"
" must be set up at the same time.")
exit(1)
return options, args
@staticmethod
def _checkType(value, value_type, value_name='', msg=None):
"""Check correctness of type of value and exit if not."""
if not isinstance(value, value_type):
message = msg or "Value %r in config have type %r but"\
" %r is expected." %\
(value_name, type(value).__name__, value_type.__name__)
main_logger.error(message)
exit(1)
@classmethod
def configValidate(cls, config):
"""Validate types and names of data items in config."""
cls._checkType(config, dict, msg='Config must be a dict.')
for key in ("daemon", "run_once", "debug"):
if key in config:
cls._checkType(config[key], bool, key)
key = "hostname"
if key in config:
cls._checkType(config[key], basestring, key)
key = "watchlist"
if key in config:
cls._checkType(config[key], list, key)
else:
main_logger.error("There must be key %r in config." % key)
exit(1)
for item in config["watchlist"]:
cls._checkType(item, dict, "watchlist[n]")
key, name = "servers", "watchlist[n] => servers"
if key in item:
cls._checkType(item[key], list, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key, '"watchlist[n]" item'))
exit(1)
key, name = "watchfiles", "watchlist[n] => watchfiles"
if key in item:
cls._checkType(item[key], list, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key, '"watchlist[n]" item'))
exit(1)
for item2 in item["servers"]:
cls._checkType(item2, dict, "watchlist[n] => servers[n]")
key, name = "host", "watchlist[n] => servers[n] => host"
if key in item2:
cls._checkType(item2[key], basestring, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key,
'"watchlist[n] => servers[n]" item'))
exit(1)
key, name = "port", "watchlist[n] => servers[n] => port"
if key in item2:
cls._checkType(item2[key], int, name)
for item2 in item["watchfiles"]:
cls._checkType(item2, dict, "watchlist[n] => watchfiles[n]")
key, name = "tag", "watchlist[n] => watchfiles[n] => tag"
if key in item2:
cls._checkType(item2[key], basestring, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key,
'"watchlist[n] => watchfiles[n]" item'))
exit(1)
key = "log_type"
name = "watchlist[n] => watchfiles[n] => log_type"
if key in item2:
cls._checkType(item2[key], basestring, name)
key, name = "files", "watchlist[n] => watchfiles[n] => files"
if key in item2:
cls._checkType(item2[key], list, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key,
'"watchlist[n] => watchfiles[n]" item'))
exit(1)
for item3 in item2["files"]:
name = "watchlist[n] => watchfiles[n] => files[n]"
cls._checkType(item3, basestring, name)
# Create global config.
config = Config.getConfig()
# Create list of WatchedGroup objects with different log names.
watchlist = []
i = 0
for item in config["watchlist"]:
for files in item['watchfiles']:
watchlist.append(WatchedGroup(item['servers'], files, str(i)))
i = i + 1
# Fork and loop
if config["daemon"]:
if not os.fork():
# Redirect the standard I/O file descriptors to the specified file.
main_logger = None
DEVNULL = getattr(os, "devnull", "/dev/null")
os.open(DEVNULL, os.O_RDWR) # standard input (0)
os.dup2(0, 1) # Duplicate standard input to standard output (1)
os.dup2(0, 2) # Duplicate standard input to standard error (2)
main_loop()
sys.exit(1)
sys.exit(0)
else:
if not config['debug']:
main_logger = None
main_loop()

View File

@ -39,6 +39,7 @@ dhcp
docker
fuel-bootstrap-cli
fuel-bootstrap-image
fuel-bootstrap-image-builder
# NOTE(kozhukalov): We don't need target centos images in 8.0
# fuel-target-centos-images{{CENTOS_RELEASE}}
fuelmenu

View File

@ -96,6 +96,7 @@ shotgun
$(eval $(foreach pkg,$(packages_list),$(call build_rpm,$(pkg))$(NEWLINE)))
$(BUILD_DIR)/packages/rpm/repo.done: $(BUILD_DIR)/bootstrap/fuel-bootstrap-image-builder-rpm.done
$(BUILD_DIR)/packages/rpm/repo.done:
find $(BUILD_DIR)/packages/rpm/RPMS -name '*.rpm' -exec cp -u {} $(LOCAL_MIRROR_MOS_CENTOS_OS_BASEURL)/Packages \;
createrepo -g $(LOCAL_MIRROR_MOS_CENTOS)/comps.xml \
@ -126,12 +127,19 @@ $(BUILD_DIR)/packages/rpm/fuel-docker-images.done: \
-o $(LOCAL_MIRROR_MOS_CENTOS_OS_BASEURL) $(LOCAL_MIRROR_MOS_CENTOS_OS_BASEURL)
$(ACTION.TOUCH)
$(BUILD_DIR)/packages/rpm/build.done:
# in case BUILD_PACKAGES=0 we have to build only fuel-bootstrap-image-builder
ifeq (1,$(strip $(BUILD_PACKAGES)))
$(BUILD_DIR)/packages/rpm/build.done: $(BUILD_DIR)/packages/rpm/repo.done
else
$(BUILD_DIR)/packages/rpm/build.done: $(BUILD_DIR)/bootstrap/fuel-bootstrap-image-builder-rpm.done \
$(BUILD_DIR)/mirror/centos/repo.done
find $(BUILD_DIR)/packages/rpm/RPMS -name '*.rpm' -exec cp -u {} $(LOCAL_MIRROR_MOS_CENTOS_OS_BASEURL)/Packages \;
createrepo -g $(LOCAL_MIRROR_MOS_CENTOS_OS_BASEURL)/comps.xml \
-o $(LOCAL_MIRROR_MOS_CENTOS_OS_BASEURL) $(LOCAL_MIRROR_MOS_CENTOS_OS_BASEURL)
endif
$(ACTION.TOUCH)
#######################################
# This section is for building container
# packages that depend on other packages.
@ -148,6 +156,7 @@ fuel-bootstrap-image
$(eval $(foreach pkg,$(fuel_rpm_packages_late),$(call build_rpm,$(pkg),-late)$(NEWLINE)))
$(BUILD_DIR)/packages/rpm/repo.done: $(BUILD_DIR)/bootstrap/fuel-bootstrap-image-builder-rpm.done
# BUILD_PACKAGES=0 - for late packages we need to be sure that centos mirror is ready
# BUILD_PACKAGES=1 - for late packages we need to be sure that fuel-* packages was build beforehand
$(BUILD_DIR)/packages/rpm/repo-late.done: $(BUILD_DIR)/mirror/centos/repo.done