Merge branch 'master' into rcov_tests

Conflicts:
	.gitignore
This commit is contained in:
Vladmir Sharhsov(warpc) 2013-09-02 11:21:42 +04:00
commit e4e2b26053
71 changed files with 8131 additions and 698 deletions

3
.gitignore vendored
View File

@ -7,4 +7,5 @@ coverage.data
# Need only on local machine for gem
Gemfile
Gemfile.lock
Gemfile.lock
docs/_build

View File

@ -1,2 +1,89 @@
astute
======
Astute
======
Astute is orchestrator, which is using data about nodes and deployment settings performs two things:
- provision
- deploy
Provision
-----
OS installation on selected nodes.
Provisioning is done using Cobbler. Astute orchestrator collects data about nodes and creates corresponding Cobbler systems using parameters specified in engine section of provision data. After the systems are created, it connects to Cobbler engine and reboots nodes according to the power management parameters of the node.
Deploy
-----
OpenStack installation in the desired configuration on the selected nodes.
Astute uses data about nodes and deployment settings and recalculates parameters needed for deployment. Calculated parameters are passed to the nodes being deployed by use of nailyfact MCollective agent that uploads these attributes to `/etc/naily.facts` file of the node. Then puppet parses this file using Facter plugin and uploads these facts into puppet. These facts are used during catalog compilation phase by puppet master. Finally catalog is executed and Astute orchestrator passes to the next node in deployment sequence. Fuel Library provides puppet modules for Astute.
Using as library
-----
```ruby
require 'astute'
class ConsoleReporter
def report(msg)
puts msg.inspect
end
end
reporter = ConsoleReporter.new
deploy_engine = Astute::DeploymentEngine::NailyFact
orchestrator = Astute::Orchestrator.new(deploy_engine, log_parsing=false)
# Add systems to cobbler, reboot and start installation process.
orchestrator.fast_provision(reporter, environment['engine'], environment['nodes'])
# Observation OS installation
orchestrator.provision(reporter, environment['task_uuid'], environment['nodes'])
# Deploy OpenStack
orchestrator.deploy(reporter, environment['task_uuid'], environment['nodes'], environment['attributes'])
```
More information about expected content of environment you can find here:
http://docs.mirantis.com/fuel/3.1/installation-fuel-cli.html#yaml-high-level-structure
Simple example of using Astute as library: https://github.com/Mirantis/astute/blob/master/bin/astute
Using as CLI
-----
Provision:
astute -f simple.yaml -c provision
Deploy:
astute -f simple.yaml -c deploy
More information about content of `simple.yaml` you can find here: http://docs.mirantis.com/fuel/3.1/installation-fuel-cli.html#yaml-high-level-structure
Additional materials
-----
- ISO, other materials: http://fuel.mirantis.com/
- User guide: http://docs.mirantis.com/
- Development documentation: http://docs.mirantis.com/fuel-dev/
License
------
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.

View File

@ -14,14 +14,15 @@
# License for the specific language governing permissions and limitations
# under the License.
if RUBY_VERSION < "1.9"
puts "Astute tested and works only on Ruby 1.9.3 but you use #{RUBY_VERSION}"
puts "If you still want to try it on older versions of ruby, try 'ruby -rubygems bin/astute'"
end
require 'optparse'
require 'yaml'
begin
require 'astute'
rescue LoadError
require 'rubygems'
require 'astute'
end
require 'astute'
require 'astute/version'
class ConsoleReporter
def report(msg)
@ -31,7 +32,7 @@ end
opts = {}
optparse = OptionParser.new do |o|
o.banner = "Usage: bin/astute -f FILENAME"
o.banner = "Usage: bin/astute -c COMMAND -f FILENAME "
o.on("-v", "--[no-]verbose", "Run verbosely") do |v|
opts[:verbose] = v
@ -41,7 +42,9 @@ optparse = OptionParser.new do |o|
opts[:filename] = f
end
o.on("-h") { puts o; exit }
o.on_tail("-h", "--help", "Show this message") { puts o; exit }
o.on_tail("--version", "Show version") { puts Astute::VERSION; exit }
o.on("-c", "--command COMMAND", [:provision, :deploy, :provision_and_deploy],
"Select operation: provision, deploy or provision_and_deploy") do |c|
@ -102,9 +105,10 @@ begin
end
res
end
end
rescue => e
puts "Error: #{e.inspect}"
result = Astute::FAIL
puts "Error: #{e.inspect}"
puts "Hint: use astute with --verbose or check log (#{Astute::LOG_PATH}) for more details" unless opts[:verbose]
Astute.logger.error e.backtrace.join("\n")
end
exit result
exit result

161
docs/Makefile Normal file
View File

@ -0,0 +1,161 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf pdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " pdf to make pdf files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/fuel.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/fuel.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/fuel"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/fuel"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
pdf:
$(SPHINXBUILD) -b pdf $(ALLSPHINXOPTS) $(BUILDDIR)/pdf
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/pdf."
@echo "Run \`make' in that directory to run these through pdf" \
"(use \`make pdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

9
docs/_static/bootstrap-responsive.css vendored Normal file

File diff suppressed because one or more lines are too long

24
docs/_static/bootstrap-sphinx.css_t vendored Normal file
View File

@ -0,0 +1,24 @@
/*
* bootstrap-sphinx.css
* ~~~~~~~~~~~~~~~~~~~~
*
* Sphinx stylesheet -- Twitter Bootstrap theme.
*/
body {
padding-top: 52px;
}
.navbar .brand {
color: #FFF;
text-shadow: #777 2px 2px 3px;
}
{%- block sidebarlogo %}
{%- if logo %}
.navbar h3 a, .navbar .brand {
background: transparent url("{{ logo }}") no-repeat 22px 3px;
padding-left: 62px;
}
{%- endif %}
{%- endblock %}

9
docs/_static/bootstrap.css vendored Normal file

File diff suppressed because one or more lines are too long

6
docs/_static/bootstrap.js vendored Normal file

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

4
docs/_static/jquery.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,4 @@
<li class="dropdown">
<a href="{{ pathto(master_doc) }}" class="dropdown-toggle" data-toggle="dropdown">{{ _('Site') }} <b class="caret"></b></a>
<ul class="dropdown-menu globaltoc">{{ toctree(maxdepth=1) }}</ul>
</li>

134
docs/_templates/bootstrap/layout.html vendored Normal file
View File

@ -0,0 +1,134 @@
{% extends "basic/layout.html" %}
{% set script_files = script_files + ['_static/bootstrap.js'] %}
{% set css_files = ['_static/bootstrap.css', '_static/bootstrap-sphinx.css'] + css_files %}
{# Sidebar: Rework into our Boostrap nav section. #}
{% macro navBar() %}
<div id="navbar" class="navbar navbar-fixed-top">
<div class="navbar-inner">
<div class="container-fluid">
<a class="brand" href="{{ pathto(master_doc) }}">{{ project|e }}</a>
<span class="navbar-text pull-left"><b>{{ version|e }}</b></span>
<ul class="nav">
<li class="divider-vertical"></li>
{% block sidebartoc %}
{% include "globaltoc.html" %}
{% include "localtoc.html" %}
{% endblock %}
{% block sidebarrel %}
{% include "relations.html" %}
{% endblock %}
{% block sidebarsourcelink %}
{% include "sourcelink.html" %}
{% endblock %}
</ul>
{% block sidebarsearch %}
{% include "searchbox.html" %}
{% endblock %}
</ul>
</div>
</div>
</div>
</div>
{% endmacro %}
{%- block extrahead %}
<script type="text/javascript">
(function () {
/**
* Patch TOC list.
*
* Will mutate the underlying span to have a correct ul for nav.
*
* @param $span: Span containing nested UL's to mutate.
* @param minLevel: Starting level for nested lists. (1: global, 2: local).
*/
var patchToc = function ($ul, minLevel) {
var findA;
// Find all a "internal" tags, traversing recursively.
findA = function ($elem, level) {
var level = level || 0,
$items = $elem.find("> li > a.internal, > ul, > li > ul");
// Iterate everything in order.
$items.each(function (index, item) {
var $item = $(item),
tag = item.tagName.toLowerCase(),
pad = 15 + ((level - minLevel) * 10);
if (tag === 'a' && level >= minLevel) {
// Add to existing padding.
$item.css('padding-left', pad + "px");
console.log(level, $item, 'padding-left', pad + "px");
} else if (tag === 'ul') {
// Recurse.
findA($item, level + 1);
}
});
};
console.log("HERE");
findA($ul);
};
$(document).ready(function () {
// Add styling, structure to TOC's.
$(".dropdown-menu").each(function () {
$(this).find("ul").each(function (index, item){
var $item = $(item);
$item.addClass('unstyled');
});
$(this).find("li").each(function () {
$(this).parent().append(this);
});
});
// Patch in level.
patchToc($("ul.globaltoc"), 2);
patchToc($("ul.localtoc"), 2);
// Enable dropdown.
$('.dropdown-toggle').dropdown();
});
}());
</script>
{% endblock %}
{% block header %}{{ navBar() }}{% endblock %}
{# Silence the sidebar's, relbar's #}
{% block sidebar1 %}{% endblock %}
{% block sidebar2 %}{% endblock %}
{% block relbar1 %}{% endblock %}
{% block relbar2 %}{% endblock %}
{%- block content %}
<div class="container">
{% block body %} {% endblock %}
</div>
{%- endblock %}
{%- block footer %}
<footer class="footer">
<div class="container">
<p class="pull-right"><a href="#">Back to top</a></p>
<p>
{%- if show_copyright %}
{%- if hasdoc('copyright') %}
{% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}<br/>
{%- else %}
{% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}<br/>
{%- endif %}
{%- endif %}
{%- if last_updated %}
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}<br/>
{%- endif %}
{%- if show_sphinx %}
{% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}<br/>
{%- endif %}
</p>
</div>
</footer>
{%- endblock %}

View File

@ -0,0 +1,5 @@
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">{{ _('Page') }} <b class="caret"></b></a>
<ul class="dropdown-menu localtoc">{{ toc }}</ul>
<!--<span class="localtoc">{{ toc }}</span>-->
</li>

View File

@ -0,0 +1,8 @@
{%- if prev %}
<li><a href="{{ prev.link|e }}"
title="{{ _('previous chapter') }}">{{ "&laquo;"|safe }} {{ prev.title }}</a></li>
{%- endif %}
{%- if next %}
<li><a href="{{ next.link|e }}"
title="{{ _('next chapter') }}">{{ next.title }} {{ "&raquo;"|safe }}</a></li>
{%- endif %}

View File

@ -0,0 +1,7 @@
{%- if pagename != "search" %}
<form class="navbar-search pull-right" style="margin-bottom:-3px;" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" placeholder="Search" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
{%- endif %}

View File

@ -0,0 +1,4 @@
{%- if show_source and has_source and sourcename %}
<li><a href="{{ pathto('_sources/' + sourcename, true)|e }}"
rel="nofollow">{{ _('Source') }}</a></li>
{%- endif %}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,24 @@
/*
* bootstrap-sphinx.css
* ~~~~~~~~~~~~~~~~~~~~
*
* Sphinx stylesheet -- Twitter Bootstrap theme.
*/
body {
padding-top: 52px;
}
.navbar .brand {
color: #FFF;
text-shadow: #777 2px 2px 3px;
}
{%- block sidebarlogo %}
{%- if logo %}
.navbar h3 a, .navbar .brand {
background: transparent url("{{ logo }}") no-repeat 22px 3px;
padding-left: 62px;
}
{%- endif %}
{%- endblock %}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

File diff suppressed because one or more lines are too long

5
docs/_templates/bootstrap/theme.conf vendored Normal file
View File

@ -0,0 +1,5 @@
# Twitter Bootstrap Theme
[theme]
inherit = basic
stylesheet = basic.css
pygments_style = tango

100
docs/common_conf.py Normal file
View File

@ -0,0 +1,100 @@
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions += ['sphinx.ext.inheritance_diagram', 'sphinxcontrib.blockdiag', 'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag', 'sphinxcontrib.nwdiag']
# The encoding of source files.
source_encoding = 'utf-8-sig'
#source_encoding = 'shift_jis'
# The language for content autogenerated by Sphinx.
#language = 'en'
#language = 'ja'
# The theme to use for HTML and HTML Help pages.
#html_theme = 'default'
#html_theme = 'sphinxdoc'
#html_theme = 'scrolls'
#html_theme = 'agogo'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'haiku'
# If this is not the empty string, a 'Last updated on:' timestamp
# is inserted at every page bottom, using the given strftime() format.
# Default is '%b %d, %Y' (or a locale-dependent equivalent).
html_last_updated_fmt = '%Y/%m/%d'
# Enable Antialiasing
blockdiag_antialias = True
acttdiag_antialias = True
seqdiag_antialias = True
nwdiag_antialias = True
extensions += ['rst2pdf.pdfbuilder']
pdf_documents = [
(master_doc, project, project, copyright),
]
pdf_stylesheets = ['b4', 'kerning']
#pdf_language = "en"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
#pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
#pdf_break_level = 0
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
pdf_verbosity = 0
# If false, no index is generated.
pdf_use_index = True
# If false, no modindex is generated.
pdf_use_modindex = True
# If false, no coverpage is generated.
pdf_use_coverpage = True
# Name of the cover page template to use
#pdf_cover_template = 'sphinxcover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is only vectorpdf)
# you need vectorpdf if you want to use sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
#pdf_page_template = 'cutePage'
# Show Table Of Contents at the beginning?
pdf_use_toc = True
# How many levels deep should the table of contents be?
pdf_toc_depth = 2
# Add section number to section references
pdf_use_numbered_links = False
# Background images fitting mode
pdf_fit_background_mode = 'scale'
pdf_font_path = ['C:\\Windows\\Fonts\\', '/usr/share/fonts']

245
docs/conf.py Normal file
View File

@ -0,0 +1,245 @@
# -*- coding: utf-8 -*-
#
# "Fuel" documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 25 14:02:29 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc','rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Astute for OpenStack'
copyright = u'2013, Mirantis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
release = '3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_templates"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'astutedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'astute.tex', u'Astute Documentation',
u'Mirantis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'astute', u'Astute Documentation',
[u'Mirantis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'astute', u'Astute Documentation',
u'Mirantis', 'astute', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Additional Settings -------------------------------------------------------
execfile('./common_conf.py')

11
docs/index.rst Normal file
View File

@ -0,0 +1,11 @@
==============================
Astute for OpenStack: Documentation
==============================
Table of contents
=================
.. toctree::
:maxdepth: 2
pages/0010-attribute_list

File diff suppressed because it is too large Load Diff

354
examples/compact.yaml Normal file
View File

@ -0,0 +1,354 @@
---
nodes:
- role: compute
network_data:
- name: public
ip: 172.18.94.39
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.39
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '1'
default_gateway: 172.18.94.33
uid: '1'
mac: 64:C3:54:54:D2:66
name: compute-01
ip: 172.18.94.39
profile: centos-x86_64
fqdn: compute-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.39
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: &18648020
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",\"volumes\":
[{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\":
\"mbr\"}, {\"size\": 20275, \"type\": \"pv\", \"vg\": \"os\"}],\"size\": 19232},{\"type\":
\"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\":
\"root\", \"size\": 19232}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\":
\"swap\", \"size\": 1024}]}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4\",
\"volumes\": [{\"type\": \"mbr\"}, {\"size\": 20476, \"type\": \"pv\", \"vg\":
\"cinder-volumes\"}], \"size\": 20476}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 172.18.94.34
interfaces:
eth0:
ip_address: 172.18.94.39
netmask: 255.255.255.0
dns_name: compute-01.domain.tld
static: '1'
mac_address: 64:C3:54:54:D2:66
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: &18667760
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
- role: primary-controller
network_data:
- name: public
ip: 172.18.94.41
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.41
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '2'
default_gateway: 172.18.94.33
uid: '2'
mac: 64:48:7A:14:83:E8
name: controller-01
ip: 172.18.94.41
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.41
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.41
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:48:7A:14:83:E8
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: controller
network_data:
- name: public
ip: 172.18.94.42
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.42
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '3'
default_gateway: 172.18.94.33
uid: '3'
mac: 64:B7:37:B1:1D:C9
name: controller-02
ip: 172.18.94.42
profile: centos-x86_64
fqdn: controller-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.42
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.42
netmask: 255.255.255.0
dns_name: controller-02.domain.tld
static: '1'
mac_address: 64:B7:37:B1:1D:C9
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: controller
network_data:
- name: public
ip: 172.18.94.36
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.36
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '4'
default_gateway: 172.18.94.33
uid: '4'
mac: 64:F4:64:E7:50:D3
name: controller-03
ip: 172.18.94.36
profile: centos-x86_64
fqdn: controller-03.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.36
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.36
netmask: 255.255.255.0
dns_name: controller-03.domain.tld
static: '1'
mac_address: 64:F4:64:E7:50:D3
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
attributes:
master_ip: 172.18.94.34
use_cow_images: true
libvirt_type: kvm
dns_nameservers: 172.18.94.34
verbose: true
debug: true
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.107.2.254
public_vip: 172.18.94.46
novanetwork_parameters:
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
quantum_parameters:
tenant_network_type: gre
segment_range: 300:500
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 172.18.94.48/28
fixed_network_range: 10.107.2.0/24
base_syslog:
syslog_port: '514'
syslog_server: 172.18.94.34
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: false
cinder_nodes:
- controller
ntp_servers:
- pool.ntp.org
deployment_id: 1
deployment_mode: ha
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

154
examples/convert.rb Normal file
View File

@ -0,0 +1,154 @@
#!/usr/bin/env ruby
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'json'
require 'yaml'
public_iface = "eth0"
internal_iface = "eth1"
private_iface = "eth2"
default_gateway = "172.18.94.33"
master_ip = "172.18.94.34"
dns1 = default_gateway
internal_net = '10.107.2.0'
internal_net_prefix = internal_net.split('.')[0..2].join(".")
nodes = {"compute-01" => '64:c3:54:54:d2:66',
"controller-01" => "64:48:7a:14:83:e8",
"controller-02" => "64:b7:37:b1:1d:c9",
"controller-03" => "64:f4:64:e7:50:d3",
"swift-01" => "64:57:26:83:1d:ca",
"swift-02" => "64:dc:fd:ad:eb:4e",
"swift-03" => "64:ea:df:59:79:39",
"swiftproxy-01" => "64:bc:c3:9c:07:26",
"swiftproxy-02" => "64:97:93:5f:b2:dc"
}
template = YAML.load(File.open('example_new.yaml'))
template_node = template['node_01']
newyaml = template
newyaml['nodes'] = []
newyaml.delete('node_01')
cluster = JSON.load(File.open('full.json'))
nodes.each do |node,macaddr|
result = template_node.clone
json_node = cluster.select {|n| n['mac'].to_s == macaddr.to_s.upcase}[0]
mac = json_node['mac'].to_s
ip = json_node['ip'].to_s
l_octet = ip.split('.')[3]
id = json_node['id'].to_s
uid = id
if node == nodes.select{ |n,m| n.to_s =~ /controller/ }.map{|n,m| n}.first
role = 'primary-controller'
elsif node =~ /controller/
role = 'controller'
elsif node == nodes.select { |n,m| n.to_s =~ /swiftproxy/ }.map{|n,m| n}.first
role = 'primary-swift-proxy'
elsif node =~ /swiftproxy/
role = 'swift-proxy'
elsif node =~ /swift-\d+/
role = 'storage'
else
role = 'compute'
end
cobbler_dnsname = "#{node}.domain.tld"
cobbler_interfaces = {
public_iface => {"ip_address"=>ip, "netmask"=> "255.255.255.0", "dns_name"=>cobbler_dnsname, "static"=> "1", "mac_address" => mac}
}
cobbler_interfaces_extra = {
public_iface => {'onboot'=>'yes','peerdns'=>'no'},
internal_iface => {'onboot'=>'no','peerdns'=>'no'},
private_iface => {'onboot'=>'no','peerdns'=>'no'}
}
result['interfaces'] = cobbler_interfaces
result['interfaces_extra'] = cobbler_interfaces_extra
result['power_address'] = ip
result['mac'] = mac
result['default_gateway'] = default_gateway
result['name'] = node
result['ip'] = ip
result['id'] = id
result['uid'] = uid
result['name_servers'] = master_ip
result['role'] = role
result['fqdn'] = cobbler_dnsname
system_disk=json_node['meta']['disks'].select {|disk| disk['name'] == 'vda'}.first
cinder_disk=json_node['meta']['disks'].select {|disk| disk['name'] == 'vdb'}.first
system_disk_path = system_disk['disk']
system_disk_size = (system_disk['size']/1048756.0).floor
cinder_disk_path = cinder_disk['disk']
cinder_disk_size = (cinder_disk['size']/1048756.0).floor
system_pv_size = system_disk_size - 201
swap_size = 1024
free_vg_size = system_pv_size - swap_size
free_extents = (free_vg_size/32.0).floor
system_disk_size = 32 * free_extents
# ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
# \"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
# {\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
# \"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
# \"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
# \"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
ks_spaces = '"[{\"type\": \"disk\", \"id\": \"' +
system_disk_path.to_s +
'\",\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\": \"mbr\"}, {\"size\": ' +
system_pv_size.to_s +
', \"type\": \"pv\", \"vg\": \"os\"}],\"size\": ' +
system_disk_size.to_s +
'},{\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": ' +
system_disk_size.to_s +
'}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": '+
swap_size.to_s +
'}]}, {\"type\": \"disk\", \"id\": \"' + cinder_disk_path + '\", \"volumes\": [{\"type\": \"mbr\"}, {\"size\": ' +
cinder_disk_size.to_s +
', \"type\": \"pv\", \"vg\": \"cinder-volumes\"}], \"size\": ' +
cinder_disk_size.to_s + '}]"'
cobbler_ks_meta={"ks_spaces"=>ks_spaces,"mco_host"=>master_ip}
result['ks_meta'] = result['ks_meta'].update(cobbler_ks_meta)
puppet_network_data = [
{"name" => 'public', 'ip'=>ip, "dev" => public_iface, 'netmask' => "255.255.255.0", "gateway" => default_gateway },
{"name" => ['management','storage'], 'ip'=>"#{internal_net_prefix.to_s}.#{l_octet}", "dev" => internal_iface, 'netmask' => "255.255.255.0"},
{"name" => 'fixed', "dev" => private_iface},
]
result['network_data'] = puppet_network_data
# puts result.to_yaml
newyaml['nodes'].push(result)
end
newyaml['attributes']['master_ip'] = master_ip
newyaml['attributes']['dns_nameservers'] = master_ip
newyaml['attributes']['libvirt_type'] = 'kvm'
newyaml['attributes']['public_vip'] = '172.18.94.46'
newyaml['attributes']['management_vip'] = '10.107.2.254'
newyaml['attributes']['floating_network_range'] = '172.18.94.48/28'
newyaml['attributes']['fixed_network_range'] = '10.107.2.0/24'
newyaml['attributes']['base_syslog']['syslog_server'] = master_ip
puts newyaml.to_yaml

146
examples/convert_simple.rb Normal file
View File

@ -0,0 +1,146 @@
#!/usr/bin/env ruby
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'json'
require 'yaml'
nodes = {
"compute-01" => '64:7D:B8:84:64:79',
"controller-01" => "64:43:7B:CA:56:DD",
}
public_iface = "eth0"
internal_iface = "eth1"
private_iface = "eth2"
default_gateway = "10.20.0.1"
master_ip = "10.20.0.2"
dns1 = default_gateway
internal_net = '10.20.1.0'
internal_net_prefix = internal_net.split('.')[0..2].join(".")
template = YAML.load(File.open('example_new.yaml'))
template_node = template['node_01']
newyaml = template
newyaml['nodes'] = []
newyaml.delete('node_01')
cluster = JSON.load(File.open("simple.json"))
nodes.each do |node,macaddr|
result = template_node.clone
json_node = cluster.select {|n| n['mac'].to_s == macaddr.to_s.upcase}[0]
mac = json_node['mac'].to_s
ip = json_node['ip'].to_s
l_octet = ip.split('.')[3]
id = json_node['id'].to_s
uid = id
if node == nodes.select{ |n,m| n.to_s =~ /controller/ }.map{|n,m| n}.first
role = 'primary-controller'
elsif node =~ /controller/
role = 'controller'
elsif node == nodes.select { |n,m| n.to_s =~ /swiftproxy/ }.map{|n,m| n}.first
role = 'primary-swift-proxy'
elsif node =~ /swiftproxy/
role = 'swift-proxy'
elsif node =~ /swift-\d+/
role = 'storage'
else
role = 'compute'
end
cobbler_dnsname = "#{node}.domain.tld"
cobbler_interfaces = {
public_iface => {"ip_address"=>ip, "netmask"=> "255.255.255.0", "dns_name"=>cobbler_dnsname, "static"=> "1", "mac_address" => mac}
}
cobbler_interfaces_extra = {
public_iface => {'onboot'=>'yes','peerdns'=>'no'},
internal_iface => {'onboot'=>'no','peerdns'=>'no'},
private_iface => {'onboot'=>'no','peerdns'=>'no'}
}
result['interfaces'] = cobbler_interfaces
result['interfaces_extra'] = cobbler_interfaces_extra
result['power_address'] = ip
result['mac'] = mac
result['default_gateway'] = default_gateway
result['name'] = node
result['id'] = id
result['uid'] = uid
result['ip'] = ip
result['name_servers'] = master_ip
result['role'] = role
result['fqdn'] = cobbler_dnsname
system_disk=json_node['meta']['disks'].select {|disk| disk['name'] == 'vda'}.first
cinder_disk=json_node['meta']['disks'].select {|disk| disk['name'] == 'vdb'}.first
system_disk_path = system_disk['disk']
system_disk_size = (system_disk['size']/1048756.0).floor
cinder_disk_path = cinder_disk['disk']
cinder_disk_size = (cinder_disk['size']/1048756.0).floor
system_pv_size = system_disk_size - 201
swap_size = 1024
free_vg_size = system_pv_size - swap_size
free_extents = (free_vg_size/32.0).floor
system_disk_size = 32 * free_extents
# ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
# \"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
# {\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
# \"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
# \"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
# \"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
ks_spaces = '"[{\"type\": \"disk\", \"id\": \"' +
system_disk_path.to_s +
'\",\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\": \"mbr\"}, {\"size\": ' +
system_pv_size.to_s +
', \"type\": \"pv\", \"vg\": \"os\"}],\"size\": ' +
system_disk_size.to_s +
'},{\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": ' +
system_disk_size.to_s +
'}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": '+
swap_size.to_s +
'}]}, {\"type\": \"disk\", \"id\": \"' + cinder_disk_path + '\", \"volumes\": [{\"type\": \"mbr\"}, {\"size\": ' +
cinder_disk_size.to_s +
', \"type\": \"pv\", \"vg\": \"cinder-volumes\"}], \"size\": ' +
cinder_disk_size.to_s + '}]"'
cobbler_ks_meta={"ks_spaces"=>ks_spaces,"mco_host"=>master_ip}
result['ks_meta'] = result['ks_meta'].update(cobbler_ks_meta)
puppet_network_data = [
{"name" => 'public', 'ip'=>ip, "dev" => public_iface, 'netmask' => "255.255.255.0", "gateway" => default_gateway },
{"name" => ['management','storage'], 'ip'=>"#{internal_net_prefix.to_s}.#{l_octet}", "dev" => internal_iface, 'netmask' => "255.255.255.0"},
{"name" => 'fixed', "dev" => private_iface},
]
result['network_data'] = puppet_network_data
# puts result.to_yaml
newyaml['nodes'].push(result)
end
newyaml['attributes']['master_ip'] = master_ip
newyaml['attributes']['dns_nameservers'] = master_ip
newyaml['attributes']['libvirt_type'] = 'kvm'
newyaml['attributes']['floating_network_range'] = '10.20.0.150/28'
newyaml['attributes']['fixed_network_range'] = '10.20.1.0/24'
newyaml['attributes']['base_syslog']['syslog_server'] = master_ip
puts newyaml.to_yaml

440
examples/example_new.yaml Normal file
View File

@ -0,0 +1,440 @@
#Simple node declaration. Includes YAML reference referred in `nodes` section
node_01: &node_01
# == role
# Specifies role of the node
# [primary-controller|controller|storage|swift-proxy|primary-swift-proxy]
# Default: unspecified
role: primary-controller
# == network_data
# Array of network interfaces hashes
# === name: scalar or array of one or more of [management|fixed|public|storage|admin(**deprecated)|floating(**deprecated)]
# === ip: IP address to be configured by puppet on this interface
# === dev: interface device name
# === netmask: network mask for the interface
# === vlan: vlan ID for the interface
# === gateway: IP address of gateway (**not used**)
network_data:
- name: public
ip: 10.20.0.94
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.94
netmask: 255.255.255.0
dev: eth1
- name: fixed
dev: eth2
# == public_br
# Name of the public bridge for Quantum-enabled configuration
public_br: br-ex
# == internal_br
# Name of the internal bridge for Quantum-enabled configuration
internal_br: br-mgmt
# == id ** TO BE DOCUMENTED. Suspected: node id in mcollective server.cfg.
id: 1
# == default_gateway
# Default gateway for the node
default_gateway: 10.20.0.1
# == id ** TO BE DOCUMENTED
uid: 1
# == mac
# MAC address of the interface being used for network boot.
mac: 64:43:7B:CA:56:DD
# == name
# name of the system in cobbler
name: controller-01
# == ip
# IP issued by cobbler DHCP server to this node during network boot.
ip: 10.20.0.94
# == profile
# Cobbler profile for the node.
# Default: centos-x86_64
# [centos-x86_64|rhel-x86_64]
# CAUTION:
# rhel-x86_64 is created only after rpmcache class is run on master node
profile: centos-x86_64
# == fqdn
# Fully-qualified domain name of the node
fqdn: controller-01.domain.tld
# == power_type
# Cobbler power-type. Consult cobbler documentation for available options.
# Default: ssh
power_type: ssh
# == power_user
# Username for cobbler to manage power of this machine
# Default: unset
power_user: root
# == power_pass
# Password/credentials for cobbler to manage power of this machine
# Default: unset
power_pass: /root/.ssh/bootstrap.rsa
# == power_address
# IP address of the device managing the node power state.
# Default: unset
power_address: 10.20.0.94
# == netboot_enabled
# Disable/enable netboot for this node.
netboot_enabled: '1'
# == name_servers
# DNS name servers for this node during provisioning phase.
name_servers: ! '"10.20.0.2"'
# == puppet_master
# Hostname or IP address of puppet master node
puppet_master: fuel.domain.tld
# == ks_meta
# Kickstart metadata used during provisioning
ks_meta:
# == ks_spaces
# Kickstart data for disk partitioning
# The simplest way to calculate is to use REST call to nailgun api,
# recalculate disk size into MiB and dump the following config. Workflow is as follows:
# GET request to http://<fuel-master-node>:8000/api/nodes
# Parse JSON and derive disk data from meta['disks']. Set explicitly which disk is system and which is for cinder.
# $system_disk_size=floor($system_disk_meta['disks']['size']/1048756)
# $system_disk_path=$system_disk_meta['disks']['disk']
# $cinder_disk_size=floor($cinder_disk_meta['disks']['size']/1048756)
#
# $cinder_disk_path=$cinder_disk_meta['disks']['disk']
#
# All further calculations are made in MiB
# Calculation of system partitions
#
# For each node:
# calculate size of physical volume for operating system:
# $pv_size = $system_disk_size - 200 - 1
# declare $swap_size
# calculate size of root partition:
# $free_vg_size = $pv_size - $swap_size
# $free_extents = floor($free_vg_size/32)
# $system_disk_size = 32 * $free_extents
# ks_spaces: '"[
#{\"type\": \"disk\", \"id\": \"$system_disk_path\",
#\"volumes\":
#[
# {\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
# {\"type\": \"mbr\"},
# {\"size\": $pv_size, \"type\": \"pv\", \"vg\": \"os\"}
#],
#\"size\": $system_disk_size
#},
#{\"type\": \"vg\", \"id\": \"os\", \"volumes\":
#[
# {\"mount\": \"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": $system_disk_size },
# {\"mount\": \"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": $swap_size}
#]
#},
#{\"type\": \"disk\", \"id\": \"$path_to_cinder_disk\",
#\"volumes\":
#[
# {\"type\": \"mbr\"},
# {\"size\": $cinder_disk_size, \"type\": \"pv\", \"vg\": \"cinder-volumes\"}
#],
#\"size\": $cinder_disk_size
#}
#]"'
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
# == mco_enable
# If mcollective should be installed and enabled on the node
mco_enable: 1
# == mco_vhost
# Mcollective AMQP virtual host
mco_vhost: mcollective
# == mco_pskey
# **NOT USED**
mco_pskey: unset
# == mco_user
# Mcollective AMQP user
mco_user: mcollective
# == puppet_enable
# should puppet agent start on boot
# Default: 0
puppet_enable: 0
# == install_log_2_syslog
# Enable/disable on boot remote logging
# Default: 1
install_log_2_syslog: 1
# == mco_password
# Mcollective AMQP password
mco_password: marionette
# == puppet_auto_setup
# Whether to install puppet during provisioning
# Default: 1
puppet_auto_setup: 1
# == puppet_master
# hostname or IP of puppet master server
puppet_master: fuel.domain.tld
# == puppet_auto_setup
# Whether to install mcollective during provisioning
# Default: 1
mco_auto_setup: 1
# == auth_key
# Public RSA key to be added to cobbler authorized keys
auth_key: ! '""'
# == puppet_version
# Which puppet version to install on the node
puppet_version: 2.7.19
# == mco_connector
# Mcollective AMQP driver.
# Default: rabbitmq
mco_connector: rabbitmq
# == mco_host
# AMQP host to which Mcollective agent should connect
mco_host: 10.20.0.2
# == interfaces
# Hash of interfaces configured during provision state
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
# == interfaces_extra
# extra interfaces information
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
# == meta
# Outdated stuff needed for log parsing during astute jobs.
meta:
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
#Nodes array. Includes references to corresponding nodes' sections.
nodes:
- <<: *node_01
#Openstack cluster attributes used during deployment.
attributes:
# == master_ip
# IP of puppet master.
master_ip: 10.20.0.2
# == use_cow_images:
# Whether to use cow images
use_cow_images: true
# == libvirt_type
# Nova libvirt hypervisor type
# Values: qemu|kvm
# Default: kvm
libvirt_type: qemu
# == dns_nameservers
# array of DNS servers configured during deployment phase.
dns_nameservers:
- 10.20.0.1
# This parameter specifies the verbosity level of log messages
# in openstack components config.
# Debug would have set DEBUG level and ignore verbose settings, if any.
# Verbose would have set INFO level messages
# In case of non debug and non verbose - WARNING, default level would have set.
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option.
# == verbose
# whether to enable verbosity
# Default: true
verbose: true
# == debug
# whether to enable debug
# Default: false
debug: true
# == auto_assign_floating_ip
# Whether to assign floating IPs automatically
auto_assign_floating_ip: true
# == start_guests_on_host_boot
# Default: true
start_guests_on_host_boot: true
# == create_networks
# whether to create fixed or floating networks
create_networks: true
# == compute_scheduler_driver
# Nova scheduler driver class
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
# == quantum
# Whether quantum is enabled
# Default: true
quantum: true
# == master_hostname
# Which controller node to treat as master node. Used only certainty during deployment.
master_hostname: controller-01
# == nagios
# Whether to enable nagios clients on the nodes
nagios: false
# == proj_name
# name of nagios project
proj_name: test
# == nagios_master
# nagios master server name
nagios_master: fuelweb.domain.tld
# == management_vip
# Virtual IP address for internal services (MySQL, AMQP, internal OpenStack endpoints)
management_vip: 10.20.1.200
# == public_vip
# Virtual IP address for public services (Horizon, public OpenStack endpoints)
public_vip: 10.20.0.200
#Nova-network part, gets ignored if $quantum = `false`
novanetwork_parameters:
vlan_start: <1-1024>
# == network_manager
# Which nova-network manager to use
network_manager: String
# == network_size
# which network size to use during fixed network range segmentation
network_size: <Integer>
#Quantum part, used only if quantum='true'
quantum_parameters:
# == tenant_network_type
# Which type of network segmentation to use.
# Values: gre|vlan
tenant_network_type: gre
# == segment_range
# Range of IDs for network segmentation. Consult Quantum documentation.
# Values: gre|vlan
segment_range: ! '300:500'
# == metadata_proxy_shared_secret
# Shared secret for metadata proxy services
# Values: gre|vlan
metadata_proxy_shared_secret: quantum
# Below go credentials and access parameters for main OpenStack components
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
# == floating_network_range
# CIDR (for quantum == true) or array if IPs (for quantum == false)
# Used for creation of floating networks/IPs during deployment
floating_network_range: 10.20.0.150/26
# == fixed_network_range
# CIDR for fixed network created during deployment.
fixed_network_range: 10.20.2.0/24
# == base_syslog
# Main syslog server configuration.
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
# == syslog
# Additional syslog servers configuration.
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
# == use_unicast_corosync
# which communaction protocol to use for corosync
use_unicast_corosync: false
# == horizon_use_ssl
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
horizon_use_ssl: false
# == cinder_nodes
# Which nodes to use as cinder-volume backends
# Array of values 'all'|<hostname>|<internal IP address of node>|'controller'|<node_role>
cinder_nodes:
- controller
# == ntp_servers
# List of ntp servers
ntp_servers:
- pool.ntp.org
# == deployment_id
# Id if deployment used do differentiate environments
deployment_id: 1
# == deployment_mode
# [ha|ha_full|multinode|single|ha_minimal]
deployment_mode: ha
# == deployment_source
# [web|cli]
deployment_source: cli
# == deployment_engine
# [simplepuppet(**deprecated**)|nailyfact]
# Default: nailyfact
deployment_engine: nailyfact
#Cobbler engine parameters
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

199
examples/example_new1.yaml Normal file
View File

@ -0,0 +1,199 @@
##Network section of node configuration
node_01: &node_01
role: primary-controller
network_data:
- name: public
ip: 10.20.0.94
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.94
dev: eth1
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: 01
default_gateway: 10.20.0.1
uid: 01
mac: 64:43:7B:CA:56:DD
name: controller-01
ip: 10.20.0.94
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.94
netboot_enabled: '1'
name_servers: ! '"10.20.0.2"'
puppet_master: fuel.domain.tld
ks_meta:
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
meta:
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
nodes:
- <<: *node_01
attributes:
use_cow_images: true
libvirt_type: qemu
dns_nameservers:
- 10.20.0.1
verbose: true|false
debug: true|false
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.20.1.200
public_vip: 10.20.0.200
#Nova-network part, gets ignored if $quantum = `false`
novanetwork_parameters:
fixed_network_range: CIDR
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
#Quantum part, used only if quantum='true'
quantum_parameters:
tenant_network_type: gre
segment_range: ! '300:500'
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range:
- 10.20.0.100
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
deployment_id: 1
deployment_mode: ha
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

562
examples/example_new2.yaml Normal file
View File

@ -0,0 +1,562 @@
node_01: &node_01
role: primary-controller
network_data:
- name: public
ip: 10.20.0.94
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.94
netmask: 255.255.255.0
dev: eth1
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: 1
default_gateway: 10.20.0.1
uid: 1
mac: 64:43:7B:CA:56:DD
name: controller-01
ip: 10.20.0.94
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.94
netboot_enabled: '1'
name_servers: ! '"10.20.0.2"'
puppet_master: fuel.domain.tld
ks_meta:
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
meta:
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
node_02: &node_02
role: controller
network_data:
- name: public
ip: 10.20.0.98
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.98
netmask: 255.255.255.0
dev: eth1
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: 2
default_gateway: 10.20.0.1
uid: 2
mac: 64:C5:50:9D:A7:21
name: controller-02
ip: 10.20.0.98
profile: centos-x86_64
fqdn: controller-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.98
netboot_enabled: '1'
name_servers: ! '"10.20.0.2"'
puppet_master: fuel.domain.tld
ks_meta:
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.98
netmask: 255.255.255.0
dns_name: controller-02.domain.tld
static: '1'
mac_address: 64:C5:50:9D:A7:21
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
meta:
memory:
total: 778694656
interfaces:
- mac: 64:0A:A3:57:63:D5
max_speed: 100
name: eth2
ip: 10.22.0.98
netmask: 255.255.255.0
current_speed: 100
- mac: 64:B7:0B:14:7C:36
max_speed: 100
name: eth1
ip: 10.21.0.98
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.98
netmask: 255.255.255.0
mac: 64:C5:50:9D:A7:21
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
node_03: &node_03
role: controller
network_data:
- name: public
ip: 10.20.0.76
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.76
netmask: 255.255.255.0
dev: eth1
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: 4
default_gateway: 10.20.0.1
uid: 4
mac: 64:15:B1:0C:BB:8B
name: controller-03
ip: 10.20.0.76
profile: centos-x86_64
fqdn: controller-03.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.76
netboot_enabled: '1'
name_servers: ! '"10.20.0.2"'
puppet_master: fuel.domain.tld
ks_meta:
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.76
netmask: 255.255.255.0
dns_name: controller-03.domain.tld
static: '1'
mac_address: 64:15:B1:0C:BB:8B
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
meta:
memory:
total: 778694656
interfaces:
- mac: 64:4C:50:19:3F:D9
max_speed: 100
name: eth2
ip: 10.22.0.76
netmask: 255.255.255.0
current_speed: 100
- mac: 64:32:74:99:95:25
max_speed: 100
name: eth1
ip: 10.21.0.76
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.76
netmask: 255.255.255.0
mac: 64:15:B1:0C:BB:8B
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
node_04: &node_04
role: compute
network_data:
- name: public
ip: 10.20.0.122
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.122
netmask: 255.255.255.0
dev: eth1
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: 3
default_gateway: 10.20.0.1
uid: 3
mac: 64:7D:B8:84:64:79
name: compute-01
ip: 10.20.0.122
profile: centos-x86_64
fqdn: compute-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.122
netboot_enabled: '1'
name_servers: ! '"10.20.0.2"'
puppet_master: fuel.domain.tld
ks_meta:
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.122
netmask: 255.255.255.0
dns_name: compute-01.domain.tld
static: '1'
mac_address: 64:7D:B8:84:64:79
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
meta:
memory:
total: 778694656
interfaces:
- mac: 64:F1:30:1A:0A:95
max_speed: 100
name: eth2
ip: 10.22.0.122
netmask: 255.255.255.0
current_speed: 100
- mac: 64:95:7B:3A:4D:B6
max_speed: 100
name: eth1
ip: 10.21.0.122
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.122
netmask: 255.255.255.0
mac: 64:7D:B8:84:64:79
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
nodes:
- <<: *node_01
- <<: *node_02
- <<: *node_03
- <<: *node_04
attributes:
use_cow_images: true
libvirt_type: qemu
dns_nameservers:
- 10.20.0.1
verbose: true|false
debug: true|false
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.20.1.200
public_vip: 10.20.0.200
#Nova-network part, gets ignored if $quantum = `false`
novanetwork_parameters:
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
#Quantum part, used only if quantum='true'
quantum_parameters:
tenant_network_type: gre
segment_range: ! '300:500'
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 10.20.0.150/26
fixed_network_range: 10.20.2.0/24
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: false
deployment_id: 1
deployment_mode: ha
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -0,0 +1,56 @@
node_01:
mac: 64:43:7B:CA:56:DD
name: controller-01
ip: 10.20.0.94
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.94
netboot_enabled: '1'
name_servers: ! '"10.20.0.2"'
#Write size in megabytes
ks_meta:
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuelweb.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -226,258 +226,7 @@ attributes:
storage_network_range: 172.16.6.0/24
floating_network_range:
- 240.0.12.10
- 240.0.12.100
- 240.0.12.101
- 240.0.12.102
- 240.0.12.103
- 240.0.12.104
- 240.0.12.105
- 240.0.12.106
- 240.0.12.107
- 240.0.12.108
- 240.0.12.109
- 240.0.12.11
- 240.0.12.110
- 240.0.12.111
- 240.0.12.112
- 240.0.12.113
- 240.0.12.114
- 240.0.12.115
- 240.0.12.116
- 240.0.12.117
- 240.0.12.118
- 240.0.12.119
- 240.0.12.12
- 240.0.12.120
- 240.0.12.121
- 240.0.12.122
- 240.0.12.123
- 240.0.12.124
- 240.0.12.125
- 240.0.12.126
- 240.0.12.127
- 240.0.12.128
- 240.0.12.129
- 240.0.12.13
- 240.0.12.130
- 240.0.12.131
- 240.0.12.132
- 240.0.12.133
- 240.0.12.134
- 240.0.12.135
- 240.0.12.136
- 240.0.12.137
- 240.0.12.138
- 240.0.12.139
- 240.0.12.14
- 240.0.12.140
- 240.0.12.141
- 240.0.12.142
- 240.0.12.143
- 240.0.12.144
- 240.0.12.145
- 240.0.12.146
- 240.0.12.147
- 240.0.12.148
- 240.0.12.149
- 240.0.12.15
- 240.0.12.150
- 240.0.12.151
- 240.0.12.152
- 240.0.12.153
- 240.0.12.154
- 240.0.12.155
- 240.0.12.156
- 240.0.12.157
- 240.0.12.158
- 240.0.12.159
- 240.0.12.16
- 240.0.12.160
- 240.0.12.161
- 240.0.12.162
- 240.0.12.163
- 240.0.12.164
- 240.0.12.165
- 240.0.12.166
- 240.0.12.167
- 240.0.12.168
- 240.0.12.169
- 240.0.12.17
- 240.0.12.170
- 240.0.12.171
- 240.0.12.172
- 240.0.12.173
- 240.0.12.174
- 240.0.12.175
- 240.0.12.176
- 240.0.12.177
- 240.0.12.178
- 240.0.12.179
- 240.0.12.18
- 240.0.12.180
- 240.0.12.181
- 240.0.12.182
- 240.0.12.183
- 240.0.12.184
- 240.0.12.185
- 240.0.12.186
- 240.0.12.187
- 240.0.12.188
- 240.0.12.189
- 240.0.12.19
- 240.0.12.190
- 240.0.12.191
- 240.0.12.192
- 240.0.12.193
- 240.0.12.194
- 240.0.12.195
- 240.0.12.196
- 240.0.12.197
- 240.0.12.198
- 240.0.12.199
- 240.0.12.2
- 240.0.12.20
- 240.0.12.200
- 240.0.12.201
- 240.0.12.202
- 240.0.12.203
- 240.0.12.204
- 240.0.12.205
- 240.0.12.206
- 240.0.12.207
- 240.0.12.208
- 240.0.12.209
- 240.0.12.21
- 240.0.12.210
- 240.0.12.211
- 240.0.12.212
- 240.0.12.213
- 240.0.12.214
- 240.0.12.215
- 240.0.12.216
- 240.0.12.217
- 240.0.12.218
- 240.0.12.219
- 240.0.12.22
- 240.0.12.220
- 240.0.12.221
- 240.0.12.222
- 240.0.12.223
- 240.0.12.224
- 240.0.12.225
- 240.0.12.226
- 240.0.12.227
- 240.0.12.228
- 240.0.12.229
- 240.0.12.23
- 240.0.12.230
- 240.0.12.231
- 240.0.12.232
- 240.0.12.233
- 240.0.12.234
- 240.0.12.235
- 240.0.12.236
- 240.0.12.237
- 240.0.12.238
- 240.0.12.239
- 240.0.12.24
- 240.0.12.240
- 240.0.12.241
- 240.0.12.242
- 240.0.12.243
- 240.0.12.244
- 240.0.12.245
- 240.0.12.246
- 240.0.12.247
- 240.0.12.248
- 240.0.12.249
- 240.0.12.25
- 240.0.12.250
- 240.0.12.251
- 240.0.12.252
- 240.0.12.253
- 240.0.12.254
- 240.0.12.26
- 240.0.12.27
- 240.0.12.28
- 240.0.12.29
- 240.0.12.3
- 240.0.12.30
- 240.0.12.31
- 240.0.12.32
- 240.0.12.33
- 240.0.12.34
- 240.0.12.35
- 240.0.12.36
- 240.0.12.37
- 240.0.12.38
- 240.0.12.39
- 240.0.12.4
- 240.0.12.40
- 240.0.12.41
- 240.0.12.42
- 240.0.12.43
- 240.0.12.44
- 240.0.12.45
- 240.0.12.46
- 240.0.12.47
- 240.0.12.48
- 240.0.12.49
- 240.0.12.5
- 240.0.12.50
- 240.0.12.51
- 240.0.12.52
- 240.0.12.53
- 240.0.12.54
- 240.0.12.55
- 240.0.12.56
- 240.0.12.57
- 240.0.12.58
- 240.0.12.59
- 240.0.12.6
- 240.0.12.60
- 240.0.12.61
- 240.0.12.62
- 240.0.12.63
- 240.0.12.64
- 240.0.12.65
- 240.0.12.66
- 240.0.12.67
- 240.0.12.68
- 240.0.12.69
- 240.0.12.7
- 240.0.12.70
- 240.0.12.71
- 240.0.12.72
- 240.0.12.73
- 240.0.12.74
- 240.0.12.75
- 240.0.12.76
- 240.0.12.77
- 240.0.12.78
- 240.0.12.79
- 240.0.12.8
- 240.0.12.80
- 240.0.12.81
- 240.0.12.82
- 240.0.12.83
- 240.0.12.84
- 240.0.12.85
- 240.0.12.86
- 240.0.12.87
- 240.0.12.88
- 240.0.12.89
- 240.0.12.9
- 240.0.12.90
- 240.0.12.91
- 240.0.12.92
- 240.0.12.93
- 240.0.12.94
- 240.0.12.95
- 240.0.12.96
- 240.0.12.97
- 240.0.12.98
- 240.0.12.99
auth_key: ''
syslog:
syslog_port: '514'
@ -489,4 +238,4 @@ attributes:
db_password: rveahKih
user_password: ENwyu6oa
deployment_id: 8

776
examples/full.json Normal file
View File

@ -0,0 +1,776 @@
[
{
"status": "discover",
"name": "Untitled (1D:CA)",
"ip": "172.18.94.43",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:57:26:83:1D:CA",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:EA:09:C2:C6:75",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:4D:C1:48:2F:8A",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.43",
"netmask": "255.255.255.224",
"mac": "64:57:26:83:1D:CA",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 7,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (07:26)",
"ip": "172.18.94.40",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:BC:C3:9C:07:26",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:E7:D3:F4:FD:BD",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:81:FF:17:59:9F",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.40",
"netmask": "255.255.255.224",
"mac": "64:BC:C3:9C:07:26",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 6,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (B2:DC)",
"ip": "172.18.94.45",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:97:93:5F:B2:DC",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:E1:22:FF:4D:64",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:FC:F2:3C:CA:5D",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.45",
"netmask": "255.255.255.224",
"mac": "64:97:93:5F:B2:DC",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 5,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (50:D3)",
"ip": "172.18.94.36",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:F4:64:E7:50:D3",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:7E:46:33:10:33",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:E1:41:CF:03:D7",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.36",
"netmask": "255.255.255.224",
"mac": "64:F4:64:E7:50:D3",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 4,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (83:E8)",
"ip": "172.18.94.41",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:48:7A:14:83:E8",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:DC:6B:09:02:4C",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:D0:7C:2E:02:59",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.41",
"netmask": "255.255.255.224",
"mac": "64:48:7A:14:83:E8",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 2,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (EB:4E)",
"ip": "172.18.94.47",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:DC:FD:AD:EB:4E",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:74:10:45:3D:53",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:7E:1B:D7:5D:EF",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.47",
"netmask": "255.255.255.224",
"mac": "64:DC:FD:AD:EB:4E",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 8,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (79:39)",
"ip": "172.18.94.44",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:EA:DF:59:79:39",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:E9:9A:4D:04:3F",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:B9:CE:28:0D:0E",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.44",
"netmask": "255.255.255.224",
"mac": "64:EA:DF:59:79:39",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 9,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (D2:66)",
"ip": "172.18.94.39",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:C3:54:54:D2:66",
"meta": {
"memory": {
"slots": 1,
"total": 2147483648,
"maximum_capacity": 2147483648,
"devices": [
{
"type": "RAM",
"size": 2147483648
}
]
},
"interfaces": [
{
"mac": "64:86:89:DC:11:DF",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:3E:31:66:CA:AF",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.39",
"netmask": "255.255.255.224",
"mac": "64:C3:54:54:D2:66",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 1,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (1D:C9)",
"ip": "172.18.94.42",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:B7:37:B1:1D:C9",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:49:B1:F5:AA:CF",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:B3:BC:6D:17:A0",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "172.18.94.42",
"netmask": "255.255.255.224",
"mac": "64:B7:37:B1:1D:C9",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 2,
"spec": [
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
},
{
"model": "QEMU Virtual CPU version 1.2.0",
"frequency": 1999
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 3,
"manufacturer": "KVM"
}
]

614
examples/full.yaml Normal file
View File

@ -0,0 +1,614 @@
---
nodes:
- role: compute
network_data:
- name: public
ip: 172.18.94.39
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.39
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '1'
default_gateway: 172.18.94.33
uid: '1'
mac: 64:C3:54:54:D2:66
name: compute-01
ip: 172.18.94.39
profile: centos-x86_64
fqdn: compute-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.39
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: &18648020
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",\"volumes\":
[{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\":
\"mbr\"}, {\"size\": 20275, \"type\": \"pv\", \"vg\": \"os\"}],\"size\": 19232},{\"type\":
\"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\":
\"root\", \"size\": 19232}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\":
\"swap\", \"size\": 1024}]}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4\",
\"volumes\": [{\"type\": \"mbr\"}, {\"size\": 20476, \"type\": \"pv\", \"vg\":
\"cinder-volumes\"}], \"size\": 20476}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 172.18.94.34
interfaces:
eth0:
ip_address: 172.18.94.39
netmask: 255.255.255.0
dns_name: compute-01.domain.tld
static: '1'
mac_address: 64:C3:54:54:D2:66
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: &18667760
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
- role: primary-controller
network_data:
- name: public
ip: 172.18.94.41
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.41
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '2'
default_gateway: 172.18.94.33
uid: '2'
mac: 64:48:7A:14:83:E8
name: controller-01
ip: 172.18.94.41
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.41
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.41
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:48:7A:14:83:E8
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: controller
network_data:
- name: public
ip: 172.18.94.42
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.42
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '3'
default_gateway: 172.18.94.33
uid: '3'
mac: 64:B7:37:B1:1D:C9
name: controller-02
ip: 172.18.94.42
profile: centos-x86_64
fqdn: controller-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.42
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.42
netmask: 255.255.255.0
dns_name: controller-02.domain.tld
static: '1'
mac_address: 64:B7:37:B1:1D:C9
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: controller
network_data:
- name: public
ip: 172.18.94.36
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.36
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '4'
default_gateway: 172.18.94.33
uid: '4'
mac: 64:F4:64:E7:50:D3
name: controller-03
ip: 172.18.94.36
profile: centos-x86_64
fqdn: controller-03.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.36
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.36
netmask: 255.255.255.0
dns_name: controller-03.domain.tld
static: '1'
mac_address: 64:F4:64:E7:50:D3
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: storage
network_data:
- name: public
ip: 172.18.94.43
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.43
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '7'
default_gateway: 172.18.94.33
uid: '7'
mac: 64:57:26:83:1D:CA
name: swift-01
ip: 172.18.94.43
profile: centos-x86_64
fqdn: swift-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.43
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.43
netmask: 255.255.255.0
dns_name: swift-01.domain.tld
static: '1'
mac_address: 64:57:26:83:1D:CA
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: storage
network_data:
- name: public
ip: 172.18.94.47
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.47
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '8'
default_gateway: 172.18.94.33
uid: '8'
mac: 64:DC:FD:AD:EB:4E
name: swift-02
ip: 172.18.94.47
profile: centos-x86_64
fqdn: swift-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.47
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.47
netmask: 255.255.255.0
dns_name: swift-02.domain.tld
static: '1'
mac_address: 64:DC:FD:AD:EB:4E
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: storage
network_data:
- name: public
ip: 172.18.94.44
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.44
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '9'
default_gateway: 172.18.94.33
uid: '9'
mac: 64:EA:DF:59:79:39
name: swift-03
ip: 172.18.94.44
profile: centos-x86_64
fqdn: swift-03.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.44
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.44
netmask: 255.255.255.0
dns_name: swift-03.domain.tld
static: '1'
mac_address: 64:EA:DF:59:79:39
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: primary-swift-proxy
network_data:
- name: public
ip: 172.18.94.40
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.40
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '6'
default_gateway: 172.18.94.33
uid: '6'
mac: 64:BC:C3:9C:07:26
name: swiftproxy-01
ip: 172.18.94.40
profile: centos-x86_64
fqdn: swiftproxy-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.40
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.40
netmask: 255.255.255.0
dns_name: swiftproxy-01.domain.tld
static: '1'
mac_address: 64:BC:C3:9C:07:26
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
- role: swift-proxy
network_data:
- name: public
ip: 172.18.94.45
dev: eth0
netmask: 255.255.255.0
gateway: 172.18.94.33
- name:
- management
- storage
ip: 10.107.2.45
dev: eth1
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '5'
default_gateway: 172.18.94.33
uid: '5'
mac: 64:97:93:5F:B2:DC
name: swiftproxy-02
ip: 172.18.94.45
profile: centos-x86_64
fqdn: swiftproxy-02.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 172.18.94.45
netboot_enabled: '1'
name_servers: 172.18.94.34
puppet_master: fuel.domain.tld
ks_meta: *18648020
interfaces:
eth0:
ip_address: 172.18.94.45
netmask: 255.255.255.0
dns_name: swiftproxy-02.domain.tld
static: '1'
mac_address: 64:97:93:5F:B2:DC
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *18667760
error_type:
attributes:
master_ip: 172.18.94.34
use_cow_images: true
libvirt_type: kvm
dns_nameservers: 172.18.94.34
verbose: true
debug: true
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.107.2.254
public_vip: 172.18.94.46
novanetwork_parameters:
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
quantum_parameters:
tenant_network_type: gre
segment_range: 300:500
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 172.18.94.48/28
fixed_network_range: 10.107.2.0/24
base_syslog:
syslog_port: '514'
syslog_server: 172.18.94.34
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: false
cinder_nodes:
- controller
ntp_servers:
- pool.ntp.org
deployment_id: 1
deployment_mode: ha_full
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

166
examples/simple.json Normal file
View File

@ -0,0 +1,166 @@
[
{
"status": "discover",
"name": "Untitled (64:79)",
"ip": "10.20.0.122",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:7D:B8:84:64:79",
"meta": {
"memory": {
"slots": 1,
"total": 2147483648,
"maximum_capacity": 2147483648,
"devices": [
{
"type": "RAM",
"size": 2147483648
}
]
},
"interfaces": [
{
"mac": "64:F1:30:1A:0A:95",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:95:7B:3A:4D:B6",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "10.20.0.122",
"netmask": "255.255.255.0",
"mac": "64:7D:B8:84:64:79",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 1,
"spec": [
{
"model": "QEMU Virtual CPU version 1.0",
"frequency": 3300
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 1,
"manufacturer": "KVM"
},
{
"status": "discover",
"name": "Untitled (56:DD)",
"ip": "10.20.0.94",
"error_type": null,
"pending_addition": false,
"fqdn": null,
"network_data": [],
"platform_name": null,
"cluster": null,
"mac": "64:43:7B:CA:56:DD",
"meta": {
"memory": {
"slots": 1,
"total": 1073741824,
"maximum_capacity": 1073741824,
"devices": [
{
"type": "RAM",
"size": 1073741824
}
]
},
"interfaces": [
{
"mac": "64:D8:E1:F6:66:43",
"max_speed": null,
"name": "eth2",
"current_speed": null
},
{
"mac": "64:C8:E2:3B:FD:6E",
"max_speed": null,
"name": "eth1",
"current_speed": null
},
{
"name": "eth0",
"ip": "10.20.0.94",
"netmask": "255.255.255.0",
"mac": "64:43:7B:CA:56:DD",
"max_speed": null,
"current_speed": null
}
],
"disks": [
{
"model": null,
"disk": "disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4",
"name": "vdb",
"size": 21474836480
},
{
"model": null,
"disk": "disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3",
"name": "vda",
"size": 21474836480
}
],
"system": {
"fqdn": "bootstrap",
"manufacturer": "KVM"
},
"cpu": {
"real": 0,
"total": 1,
"spec": [
{
"model": "QEMU Virtual CPU version 1.0",
"frequency": 3300
}
]
}
},
"role": null,
"online": true,
"progress": 0,
"pending_deletion": false,
"os_platform": "centos",
"id": 2,
"manufacturer": "KVM"
}
]

249
examples/simple.yaml Normal file
View File

@ -0,0 +1,249 @@
---
nodes:
- role: compute
network_data:
- name: public
ip: 10.20.0.122
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.122
dev: eth0
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '1'
default_gateway: 10.20.0.1
uid: '1'
mac: 64:7D:B8:84:64:79
name: compute-01
ip: 10.20.0.122
profile: centos-x86_64
fqdn: compute-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.122
netboot_enabled: '1'
name_servers: 10.20.0.2
puppet_master: fuel.domain.tld
ks_meta: &17570000
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",\"volumes\":
[{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200}, {\"type\":
\"mbr\"}, {\"size\": 20275, \"type\": \"pv\", \"vg\": \"os\"}],\"size\": 19232},{\"type\":
\"vg\", \"id\": \"os\", \"volumes\": [{\"mount\": \"/\", \"type\": \"lv\", \"name\":
\"root\", \"size\": 19232}, {\"mount\": \"swap\", \"type\": \"lv\", \"name\":
\"swap\", \"size\": 1024}]}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:07.0-virtio-pci-virtio4\",
\"volumes\": [{\"type\": \"mbr\"}, {\"size\": 20476, \"type\": \"pv\", \"vg\":
\"cinder-volumes\"}], \"size\": 20476}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.122
netmask: 255.255.255.0
dns_name: compute-01.domain.tld
static: '1'
mac_address: 64:7D:B8:84:64:79
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: &17588060
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
- role: primary-controller
network_data:
- name: public
ip: 10.20.0.94
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.94
dev: eth0
netmask: 255.255.255.0
- name: fixed
dev: eth2
public_br: br-ex
internal_br: br-mgmt
id: '2'
default_gateway: 10.20.0.1
uid: '2'
mac: 64:43:7B:CA:56:DD
name: controller-01
ip: 10.20.0.94
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.94
netboot_enabled: '1'
name_servers: 10.20.0.2
puppet_master: fuel.domain.tld
ks_meta: *17570000
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
interfaces_extra:
eth0:
onboot: 'yes'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth2:
onboot: 'no'
peerdns: 'no'
meta: *17588060
error_type:
attributes:
use_cow_images: true
libvirt_type: kvm
dns_nameservers: 10.20.0.2
verbose: true
debug: true
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
master_hostname: controller-01
nagios: false
proj_name: test
nagios_master: fuelweb.domain.tld
management_vip: 10.20.1.200
public_vip: 10.20.0.200
novanetwork_parameters:
vlan_start: <1-1024>
network_manager: String
network_size: <Integer>
quantum_parameters:
tenant_network_type: gre
segment_range: 300:500
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 10.20.0.150/28
fixed_network_range: 10.20.1.0/24
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: false
cinder_nodes:
- controller
ntp_servers:
- pool.ntp.org
deployment_id: 1
deployment_mode: ha
deployment_source: cli
deployment_engine: nailyfact
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -16,7 +16,10 @@ require 'astute/ruby_removed_functions'
require 'json'
require 'logger'
require 'shellwords'
require 'astute/ext/exception'
require 'astute/ext/deep_copy'
require 'astute/config'
require 'astute/logparser'
require 'astute/orchestrator'
@ -36,18 +39,27 @@ module Astute
autoload 'NodesRemover', 'astute/nodes_remover'
autoload 'Node', 'astute/node'
autoload 'NodesHash', 'astute/node'
autoload 'RedhatChecker', 'astute/redhat_checker'
LogParser.autoload :ParseDeployLogs, 'astute/logparser/deployment'
LogParser.autoload :ParseProvisionLogs, 'astute/logparser/provision'
LogParser.autoload :Patterns, 'astute/logparser/parser_patterns'
SUCCESS = 0
FAIL = 1
LOG_PATH = '/var/log/astute.log'
def self.logger
unless @logger
@logger = Logger.new('/var/log/astute.log')
@logger = Logger.new(LOG_PATH)
@logger.formatter = proc do |severity, datetime, progname, msg|
severity_map = {'DEBUG' => 'debug', 'INFO' => 'info', 'WARN' => 'warning', 'ERROR' => 'err', 'FATAL' => 'crit'}
severity_map = {
'DEBUG' => 'debug',
'INFO' => 'info',
'WARN' => 'warning',
'ERROR' => 'err',
'FATAL' => 'crit'
}
"#{datetime.strftime("%Y-%m-%dT%H:%M:%S")} #{severity_map[severity]}: [#{Process.pid}] #{msg}\n"
end
end

View File

@ -53,14 +53,18 @@ module Astute
def self.default_config
conf = {}
conf[:PUPPET_TIMEOUT] = 60*60 # maximum time it waits for the whole deployment
conf[:PUPPET_TIMEOUT] = 60 * 60 # maximum time it waits for the whole deployment
conf[:PUPPET_DEPLOY_INTERVAL] = 2 # sleep for ## sec, then check puppet status again
conf[:PUPPET_FADE_TIMEOUT] = 60 # how long it can take for puppet to exit after dumping to last_run_summary
conf[:MC_RETRIES] = 5 # MClient tries to call mcagent before failure
conf[:MC_RETRY_INTERVAL] = 1 # MClient sleeps for ## sec between retries
conf[:PUPPET_FADE_INTERVAL] = 1 # retry every ## seconds to check puppet state if it was running
conf[:PROVISIONING_TIMEOUT] = 90 * 60 # timeout for booting target OS in provision
conf[:REBOOT_TIMEOUT] = 120 # how long it can take for node to reboot
return conf
conf[:REBOOT_TIMEOUT] = 120 # how long it can take for node to reboot
conf[:REDHAT_CHECK_CREDENTIALS_TIMEOUT] = 30 # checking redhat credentials througs mcollective
conf[:REDHAT_GET_LICENSES_POOL_TIMEOUT] = 60 # getting redhat licenses through mcollective
conf
end
end

View File

@ -39,34 +39,26 @@ module Astute
raise "Method #{method} is not implemented for #{self.class}"
end
def attrs_singlenode(nodes, attrs)
ctrl_management_ip = nodes[0]['network_data'].select {|nd| nd['name'] == 'management'}[0]['ip']
ctrl_public_ip = nodes[0]['network_data'].select {|nd| nd['name'] == 'public'}[0]['ip']
attrs['controller_node_address'] = ctrl_management_ip.split('/')[0]
attrs['controller_node_public'] = ctrl_public_ip.split('/')[0]
attrs
end
def deploy_singlenode(nodes, attrs)
# TODO(mihgen) some real stuff is needed
Astute.logger.info "Starting deployment of single node OpenStack"
deploy_piece(nodes, attrs)
end
# we mix all attrs and prepare them for Puppet
# Works for multinode deployment mode
def attrs_multinode(nodes, attrs)
ctrl_nodes = attrs['controller_nodes']
# TODO(mihgen): we should report error back if there are not enough metadata passed
ctrl_management_ips = []
ctrl_public_ips = []
ctrl_nodes.each do |n|
ctrl_management_ips << n['network_data'].select {|nd| nd['name'] == 'management'}[0]['ip']
ctrl_public_ips << n['network_data'].select {|nd| nd['name'] == 'public'}[0]['ip']
attrs['nodes'] = nodes.map do |n|
{
'fqdn' => n['fqdn'],
'name' => n['fqdn'].split(/\./)[0],
'role' => n['role'],
'internal_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['ip'].split(/\//)[0],
'internal_br' => n['internal_br'],
'internal_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['netmask'],
'storage_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['ip'].split(/\//)[0],
'storage_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['netmask'],
'public_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['ip'].split(/\//)[0],
'public_br' => n['public_br'],
'public_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['netmask'],
'default_gateway' => n['default_gateway']
}
end
attrs['controller_node_address'] = ctrl_management_ips[0].split('/')[0]
attrs['controller_node_public'] = ctrl_public_ips[0].split('/')[0]
# TODO(mihgen): we should report error back if there are not enough metadata passed
attrs
end
@ -74,62 +66,92 @@ module Astute
# It should not contain any magic with attributes, and should not directly run any type of MC plugins
# It does only support of deployment sequence. See deploy_piece implementation in subclasses.
def deploy_multinode(nodes, attrs)
deploy_ha_full(nodes, attrs)
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
other_nodes = nodes - ctrl_nodes
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
def attrs_ha(nodes, attrs)
# TODO(mihgen): we should report error back if there are not enough metadata passed
ctrl_nodes = attrs['controller_nodes']
ctrl_manag_addrs = {}
ctrl_public_addrs = {}
ctrl_storage_addrs = {}
ctrl_nodes.each do |n|
# current puppet modules require `hostname -s`
hostname = n['fqdn'].split(/\./)[0]
ctrl_manag_addrs.merge!({hostname =>
n['network_data'].select {|nd| nd['name'] == 'management'}[0]['ip'].split(/\//)[0]})
ctrl_public_addrs.merge!({hostname =>
n['network_data'].select {|nd| nd['name'] == 'public'}[0]['ip'].split(/\//)[0]})
ctrl_storage_addrs.merge!({hostname =>
n['network_data'].select {|nd| nd['name'] == 'storage'}[0]['ip'].split(/\//)[0]})
# we use the same set of mount points for all storage nodes
attrs['mp'] = [{'point' => '1', 'weight' => '1'},{'point'=>'2','weight'=>'2'}]
mountpoints = ""
attrs['mp'].each do |mountpoint|
mountpoints << "#{mountpoint['point']} #{mountpoint['weight']}\n"
end
attrs['nodes'] = ctrl_nodes.map do |n|
Astute.logger.debug("#{nodes}")
attrs['nodes'] = nodes.map do |n|
{
'fqdn' => n['fqdn'],
'name' => n['fqdn'].split(/\./)[0],
'role' => 'controller',
'internal_address' => n['network_data'].select {|nd| nd['name'] == 'management'}[0]['ip'].split(/\//)[0],
'public_address' => n['network_data'].select {|nd| nd['name'] == 'public'}[0]['ip'].split(/\//)[0],
'mountpoints' => "1 1\n2 2",
'zone' => n['id'],
'storage_local_net_ip' => n['network_data'].select {|nd| nd['name'] == 'storage'}[0]['ip'].split(/\//)[0],
'role' => n['role'],
'mountpoints' => mountpoints,
'internal_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['ip'].split(/\//)[0],
'internal_br' => n['internal_br'],
'internal_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'management')}[0]['netmask'],
'public_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['ip'].split(/\//)[0],
'public_br' => n['public_br'],
'public_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'public')}[0]['netmask'],
'swift_zone' => n['id'],
'storage_address' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['ip'].split(/\//)[0],
'storage_netmask' => n['network_data'].select {|nd| select_ifaces(nd['name'], 'storage')}[0]['netmask'],
'default_gateway' => n['default_gateway']
}
end
attrs['nodes'].first['role'] = 'primary-controller'
attrs['ctrl_hostnames'] = ctrl_nodes.map {|n| n['fqdn'].split(/\./)[0]}
attrs['master_hostname'] = ctrl_nodes[0]['fqdn'].split(/\./)[0]
attrs['ctrl_public_addresses'] = ctrl_public_addrs
attrs['ctrl_management_addresses'] = ctrl_manag_addrs
attrs['ctrl_storage_addresses'] = ctrl_storage_addrs
ctrl_nodes = attrs['nodes'].select {|n| n['role'] == 'controller'}
if attrs['nodes'].select { |node| node['role'] == 'primary-controller' }.empty?
ctrl_nodes[0]['role'] = 'primary-controller'
end
attrs['last_controller'] = ctrl_nodes.last['name']
attrs
end
def deploy_ha(nodes, attrs)
deploy_ha_full(nodes, attrs)
end
def deploy_ha_compact(nodes, attrs)
deploy_ha_full(nodes, attrs)
end
alias :attrs_ha_full :attrs_ha
alias :attrs_ha_compact :attrs_ha
def deploy_ha_full(nodes, attrs)
primary_ctrl_nodes = nodes.select {|n| n['role'] == 'primary-controller'}
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
unless primary_ctrl_nodes.any?
if ctrl_nodes.size > 1
primary_ctrl_nodes = [ctrl_nodes.shift]
end
end
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
quantum_nodes = nodes.select {|n| n['role'] == 'quantum'}
storage_nodes = nodes.select {|n| n['role'] == 'storage'}
proxy_nodes = nodes.select {|n| n['role'] == 'swift-proxy'}
primary_proxy_nodes = nodes.select {|n| n['role'] == 'primary-swift-proxy'}
other_nodes = nodes - ctrl_nodes - primary_ctrl_nodes - \
primary_proxy_nodes - quantum_nodes - storage_nodes - proxy_nodes
Astute.logger.info "Starting deployment of primary swift proxy"
deploy_piece(primary_proxy_nodes, attrs)
Astute.logger.info "Starting deployment of non-primary swift proxies"
deploy_piece(proxy_nodes, attrs)
Astute.logger.info "Starting deployment of swift storages"
deploy_piece(storage_nodes, attrs)
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(primary_ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of all controllers one by one"
ctrl_nodes.each {|n| deploy_piece([n], attrs)}
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
def deploy_ha_compact(nodes, attrs)
primary_ctrl_nodes = nodes.select {|n| n['role'] == 'primary-controller'}
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
quantum_nodes = nodes.select {|n| n['role'] == 'quantum'}
storage_nodes = nodes.select {|n| n['role'] == 'storage'}
@ -139,23 +161,37 @@ module Astute
primary_proxy_nodes - quantum_nodes
Astute.logger.info "Starting deployment of primary controller"
deploy_piece(primary_ctrl_nodes, attrs, 0, false)
deploy_piece(primary_ctrl_nodes, attrs)
Astute.logger.info "Starting deployment of all controllers one by one"
ctrl_nodes.each {|n| deploy_piece([n], attrs)}
Astute.logger.info "Starting deployment of 1st controller and 1st proxy"
deploy_piece(primary_ctrl_nodes + primary_proxy_nodes, attrs)
Astute.logger.info "Starting deployment of quantum nodes"
deploy_piece(quantum_nodes, attrs)
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
alias :deploy_ha :deploy_ha_compact
def attrs_rpmcache(nodes, attrs)
attrs
end
def deploy_rpmcache(nodes, attrs)
Astute.logger.info "Starting release downloading"
deploy_piece(nodes, attrs, 0)
end
private
def select_ifaces(var,name)
result = false
if var.is_a?(Array)
result = true if var.include?(name)
elsif var.is_a?(String)
result = true if var == name
end
end
def nodes_status(nodes, status, data_to_merge)
{'nodes' => nodes.map { |n| {'uid' => n['uid'], 'status' => status}.merge(data_to_merge) }}
end

View File

@ -16,6 +16,18 @@
class Astute::DeploymentEngine::NailyFact < Astute::DeploymentEngine
def deploy(nodes, attrs)
# Convert multi roles node to separate one role nodes
nodes.each do |node|
next unless node['role'].is_a?(Array)
node['role'].each do |role|
new_node = deep_copy(node)
new_node['role'] = role
nodes << new_node
end
nodes.delete(node)
end
attrs_for_mode = self.send("attrs_#{attrs['deployment_mode']}", nodes, attrs)
super(nodes, attrs_for_mode)
end
@ -27,21 +39,12 @@ class Astute::DeploymentEngine::NailyFact < Astute::DeploymentEngine
node_network_data = node['network_data'].nil? ? [] : node['network_data']
interfaces = node['meta']['interfaces']
network_data_puppet = calculate_networks(node_network_data, interfaces)
metadata = {
attrs_to_puppet = {
'role' => node['role'],
'uid' => node['uid'],
'network_data' => network_data_puppet.to_json
}
attrs.each do |k, v|
if v.is_a? String
metadata[k] = v
else
# And it's the problem on the puppet side now to decode json
metadata[k] = v.to_json
end
end
# Let's calculate interface settings we need for OpenStack:
node_network_data.each do |iface|
device = if iface['vlan'] && iface['vlan'] > 0
@ -49,38 +52,72 @@ class Astute::DeploymentEngine::NailyFact < Astute::DeploymentEngine
else
iface['dev']
end
metadata["#{iface['name']}_interface"] = device
if iface['ip']
metadata["#{iface['name']}_address"] = iface['ip'].split('/')[0]
if iface['name'].is_a?(String)
attrs_to_puppet["#{iface['name']}_interface"] = device
elsif iface['name'].is_a?(Array)
iface['name'].each do |name|
attrs_to_puppet["#{name}_interface"] = device
end
end
end
# internal_address is required for HA..
metadata['internal_address'] = node['network_data'].select{|nd| nd['name'] == 'management' }[0]['ip'].split('/')[0]
if attrs['novanetwork_parameters'] && \
attrs['novanetwork_parameters']['network_manager'] == 'VlanManager' && \
!attrs_to_puppet['fixed_interface']
if metadata['network_manager'] == 'VlanManager' && !metadata['fixed_interface']
metadata['fixed_interface'] = get_fixed_interface(node)
attrs_to_puppet['fixed_interface'] = get_fixed_interface(node)
end
Astute::Metadata.publish_facts(@ctx, node['uid'], metadata)
attrs_to_puppet.merge!(deep_copy(attrs))
attrs_to_puppet.each do |k, v|
unless v.is_a?(String) || v.is_a?(Integer)
attrs_to_puppet[k] = v.to_json
end
end
attrs_to_puppet
end
def deploy_piece(nodes, attrs, retries=2, change_node_status=true)
return false unless validate_nodes(nodes)
@ctx.reporter.report nodes_status(nodes, 'deploying', {'progress' => 0})
nodes_to_deploy = get_nodes_to_deploy(nodes)
if nodes_to_deploy.empty?
Astute.logger.info "#{@ctx.task_id}: Returning from deployment stage. No nodes to deploy"
return
end
Astute.logger.info "#{@ctx.task_id}: Calculation of required attributes to pass, include netw.settings"
nodes.each do |node|
create_facts(node, attrs)
@ctx.reporter.report(nodes_status(nodes_to_deploy, 'deploying', {'progress' => 0}))
nodes_to_deploy.each do |node|
node['facts'] ||= create_facts(node, attrs)
Astute::Metadata.publish_facts(@ctx, node['uid'], node['facts'])
end
Astute.logger.info "#{@ctx.task_id}: All required attrs/metadata passed via facts extension. Starting deployment."
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries, change_node_status)
nodes_roles = nodes.map { |n| { n['uid'] => n['role'] } }
Astute::PuppetdDeployer.deploy(@ctx, nodes_to_deploy, retries, change_node_status)
nodes_roles = nodes_to_deploy.map { |n| { n['uid'] => n['role'] } }
Astute.logger.info "#{@ctx.task_id}: Finished deployment of nodes => roles: #{nodes_roles.inspect}"
end
private
def get_nodes_to_deploy(nodes)
Astute.logger.info "#{@ctx.task_id}: Getting which nodes to deploy"
nodes_to_deploy = []
nodes.each do |node|
if node['status'] != 'ready'
nodes_to_deploy << node
else
Astute.logger.info "#{@ctx.task_id}: Not adding node #{node['uid']} with hostname #{node['name']} as it does not require deploying."
end
end
nodes_to_deploy
end
def get_fixed_interface(node)
return node['vlan_interface'] if node['vlan_interface']

View File

@ -0,0 +1,18 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def deep_copy(data)
Marshal.load(Marshal.dump(data))
end

View File

@ -0,0 +1,20 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Exception
def format_backtrace
backtrace.join("\n")
end
end

View File

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,7 +15,8 @@
# under the License.
# -*- coding: utf-8 -*-
require 'erb'
module Astute
module LogParser
LOG_PORTION = 10000
@ -35,6 +38,93 @@ module Astute
end
end
class DirSizeCalculation
attr_reader :nodes
def initialize(nodes)
@nodes = nodes.map{|n| n.dup}
@nodes.each{|node| node[:path_items] = weight_reassignment(node[:path_items])}
end
def deploy_type=(*args)
# Because we mimic the DeploymentParser, we should define all auxiliary method
# even they do nothing.
end
def progress_calculate(uids_to_calc, nodes)
uids_to_calc.map do |uid|
node = @nodes.find{|n| n[:uid] == uid}
node[:path_items] ||= []
progress = 0
node[:path_items].each do |item|
size = recursive_size(item[:path])
sub_progress = 100 * size / item[:max_size]
sub_progress = 0 if sub_progress < 0
sub_progress = 100 if sub_progress > 100
progress += sub_progress * item[:weight]
end
{'uid' => uid, 'progress' => progress.to_i}
end
end
private
def recursive_size(path, opts={})
return File.size?(path).to_i if not File.directory?(path)
total_size = 0
Dir[File.join("#{path}", '**/*')].each do |f|
# Option :files_only used when you want to calculate total size of
# regular files only. The default :files_only is false, so the function will
# include inode size of each dir (4096 bytes in most cases) to total value
# as the unix util 'du' does it.
total_size += File.size?(f).to_i if File.file?(f) || ! opts[:files_only]
end
total_size
end
def weight_reassignment(items)
# The finction normalizes the weights of each item in order to make sum of
# all weights equal to one.
# It divides items as wighted and unweighted depending on the existence of
# the :weight key in the item.
# - Each unweighted item will be weighted as a one N-th part of the total number of items.
# - All weights of weighted items are summed up and then each weighted item
# gets a new weight as a multiplication of a relative weight among all
# weighted items and the ratio of the number of the weighted items to
# the total number of items.
# E.g. we have four items: one with weight 0.5, another with weight 1.5, and
# two others as unweighted. All unweighted items will get the weight 1/4.
# Weight's sum of weighted items is 2. So the first item will get the weight:
# (relative weight 0.5/2) * (weighted items ratio 2/4) = 1/8.
# Finally all items will be normalised with next weights:
# 1/8, 3/8, 1/4, and 1/4.
ret_items = items.reject do |item|
weight = item[:weight]
# Save an item if it unweighted.
next if weight.nil?
raise "Weight should be a non-negative number" unless [Fixnum, Float].include?(weight.class) && weight >= 0
# Drop an item if it weighted as zero.
item[:weight] == 0
end
return [] if ret_items.empty?
ret_items.map!{|n| n.dup}
partial_weight = 1.0 / ret_items.length
weighted_items = ret_items.select{|n| n[:weight]}
weighted_sum = 0.0
weighted_items.each{|n| weighted_sum += n[:weight]}
weighted_sum = weighted_sum * ret_items.length / weighted_items.length if weighted_items.any?
raise "Unexpectedly a summary weight of weighted items is a non-positive" if weighted_items.any? && weighted_sum <= 0
ret_items.each do |item|
weight = item[:weight]
item[:weight] = weight ? weight / weighted_sum : partial_weight
end
ret_items
end
end
class ParseNodeLogs
attr_reader :pattern_spec
@ -54,12 +144,12 @@ module Astute
node_pattern_spec = Marshal.load(Marshal.dump(@pattern_spec))
@nodes_states[uid] = node_pattern_spec
end
path = "#{@pattern_spec['path_prefix']}#{node['fqdn']}/#{@pattern_spec['filename']}"
erb_path = @pattern_spec['path_format']
path = ERB.new(erb_path).result(binding())
begin
progress = (get_log_progress(path, node_pattern_spec)*100).to_i # Return percent of progress
rescue Exception => e
Astute.logger.warn "Some error occurred when calculate progress for node '#{uid}': #{e.message}, trace: #{e.backtrace.inspect}"
Astute.logger.warn "Some error occurred when calculate progress for node '#{uid}': #{e.message}, trace: #{e.format_backtrace}"
progress = 0
end

View File

@ -45,7 +45,8 @@ module Astute
{'pattern' => 'leaving (1) step postscripts', 'supposed_time' => 130},
{'pattern' => 'wait while node rebooting', 'supposed_time' => 20},
].reverse,
'filename' => 'install/anaconda.log'
'filename' => 'install/anaconda.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
},
'anaconda-log-supposed-time-kvm' => # key for default kvm provision pattern
@ -68,7 +69,8 @@ module Astute
{'pattern' => 'leaving (1) step postscripts', 'supposed_time' => 200},
{'pattern' => 'wait while node rebooting', 'supposed_time' => 20},
].reverse,
'filename' => 'install/anaconda.log'
'filename' => 'install/anaconda.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
},
'puppet-log-components-list-ha-controller' => # key for default HA deploy pattern
@ -76,6 +78,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Galera', 'weight' => 5, 'patterns' => [
{'pattern' => '/Stage[main]/Galera/File[/etc/mysql]/ensure) created', 'progress' => 0.1},
@ -240,6 +243,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Keystone', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 1},
@ -292,6 +296,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Glance', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Glance/Package[glance]/ensure) created', 'progress' => 0.1},
@ -378,6 +383,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Glance', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Glance/Package[glance]/ensure) created', 'progress' => 0.1},
@ -464,6 +470,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Keystone', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 1},

View File

@ -54,7 +54,7 @@ module Astute
pattern_spec['path_prefix'] ||= PATH_PREFIX.to_s
pattern_spec['separator'] ||= SEPARATOR.to_s
hdd = node['meta']['disks'].select{|disk| not disk['removable']}[0]
hdd = node['meta']['disks'].select{|disk| not disk['removable']}[0] rescue nil
if hdd
# Convert size from bytes to GB
hdd_size = hdd['size'] / 10 ** 9

View File

@ -16,7 +16,7 @@ module Astute
class Orchestrator
def initialize(deploy_engine=nil, log_parsing=false)
@deploy_engine = deploy_engine || Astute::DeploymentEngine::NailyFact
@log_parser = log_parsing ? LogParser::ParseDeployLogs.new : LogParser::NoParsing.new
@log_parsing = log_parsing
end
def node_type(reporter, task_id, nodes, timeout=nil)
@ -36,28 +36,29 @@ module Astute
raise "Nodes to deploy are not provided!" if nodes.empty?
# Following line fixes issues with uids: it should always be string
nodes.map { |x| x['uid'] = x['uid'].to_s } # NOTE: perform that on environment['nodes'] initialization
proxy_reporter = ProxyReporter.new(up_reporter)
context = Context.new(task_id, proxy_reporter, @log_parser)
proxy_reporter = ProxyReporter::DeploymentProxyReporter.new(up_reporter)
log_parser = @log_parsing ? LogParser::ParseDeployLogs.new : LogParser::NoParsing.new
context = Context.new(task_id, proxy_reporter, log_parser)
deploy_engine_instance = @deploy_engine.new(context)
Astute.logger.info "Using #{deploy_engine_instance.class} for deployment."
begin
@log_parser.prepare(nodes)
log_parser.prepare(nodes)
rescue Exception => e
Astute.logger.warn "Some error occurred when prepare LogParser: #{e.message}, trace: #{e.backtrace.inspect}"
Astute.logger.warn "Some error occurred when prepare LogParser: #{e.message}, trace: #{e.format_backtrace}"
end
deploy_engine_instance.deploy(nodes, attrs)
return SUCCESS
end
def fast_provision(reporter, engine_attrs, nodes)
raise "Nodes to provision are not provided!" if nodes.empty?
engine = create_engine(engine_attrs, reporter)
begin
reboot_events = reboot_nodes(engine, nodes)
failed_nodes = check_reboot_nodes(engine, reboot_events)
rescue RuntimeError => e
Astute.logger.error("Error occured while provisioning: #{e.inspect}")
reporter.report({
@ -69,7 +70,7 @@ module Astute
ensure
engine.sync
end
if failed_nodes.empty?
report_result({}, reporter)
return SUCCESS
@ -83,23 +84,29 @@ module Astute
raise StopIteration
end
end
def provision(reporter, task_id, nodes)
raise "Nodes to provision are not provided!" if nodes.empty?
def provision(reporter, task_id, nodes_up)
raise "Nodes to provision are not provided!" if nodes_up.empty?
# We need only those which are not ready/provisioned yet
nodes = []
nodes_up.each do |n|
nodes << n unless ['provisioned', 'ready'].include?(n['status'])
end
# Following line fixes issues with uids: it should always be string
nodes.map { |x| x['uid'] = x['uid'].to_s } # NOTE: perform that on environment['nodes'] initialization
nodes_uids = nodes.map { |n| n['uid'] }
provisionLogParser = LogParser::ParseProvisionLogs.new
proxy_reporter = ProxyReporter.new(reporter)
provisionLogParser = @log_parsing ? LogParser::ParseProvisionLogs.new : LogParser::NoParsing.new
proxy_reporter = ProxyReporter::DeploymentProxyReporter.new(reporter)
sleep_not_greater_than(10) do # Wait while nodes going to reboot
Astute.logger.info "Starting OS provisioning for nodes: #{nodes_uids.join(',')}"
begin
provisionLogParser.prepare(nodes)
rescue => e
Astute.logger.warn "Some error occurred when prepare LogParser: #{e.message}, trace: #{e.backtrace.inspect}"
Astute.logger.warn "Some error occurred when prepare LogParser: #{e.message}, trace: #{e.format_backtrace}"
end
end
nodes_not_booted = nodes_uids.clone
@ -107,15 +114,15 @@ module Astute
Timeout.timeout(Astute.config.PROVISIONING_TIMEOUT) do # Timeout for booting target OS
catch :done do
while true
sleep_not_greater_than(5) do
sleep_not_greater_than(5) do
types = node_type(proxy_reporter, task_id, nodes, 2)
types.each { |t| Astute.logger.debug("Got node types: uid=#{t['uid']} type=#{t['node_type']}") }
Astute.logger.debug("Not target nodes will be rejected")
target_uids = types.reject{|n| n['node_type'] != 'target'}.map{|n| n['uid']}
nodes_not_booted -= types.map { |n| n['uid'] }
Astute.logger.debug "Not provisioned: #{nodes_not_booted.join(',')}, got target OSes: #{target_uids.join(',')}"
if nodes.length == target_uids.length
Astute.logger.info "All nodes #{target_uids.join(',')} are provisioned."
throw :done
@ -123,11 +130,11 @@ module Astute
Astute.logger.debug("Nodes list length is not equal to target nodes list length: #{nodes.length} != #{target_uids.length}")
end
report_about_progress(proxy_reporter, provisionLogParser, nodes_uids, target_uids, nodes)
report_about_progress(proxy_reporter, provisionLogParser, nodes_uids, target_uids, nodes)
end
end
end
# We are here if jumped by throw from while cycle
# We are here if jumped by throw from while cycle
end
rescue Timeout::Error
msg = "Timeout of provisioning is exceeded."
@ -147,7 +154,6 @@ module Astute
proxy_reporter.report({'nodes' => nodes_progress})
return SUCCESS
end
def remove_nodes(reporter, task_id, nodes)
NodesRemover.new(Context.new(task_id, reporter), nodes).remove
@ -156,31 +162,89 @@ module Astute
def verify_networks(reporter, task_id, nodes)
Network.check_network(Context.new(task_id, reporter), nodes)
end
def download_release(up_reporter, task_id, release_info)
raise "Release information not provided!" if release_info.empty?
attrs = {'deployment_mode' => 'rpmcache',
'deployment_id' => 'rpmcache'}
facts = {'rh_username' => release_info['username'],
'rh_password' => release_info['password']}
facts.merge!(attrs)
if release_info['license_type'] == 'rhn'
facts.merge!(
{'use_satellite' => 'true',
'sat_hostname' => release_info['satellite'],
'activation_key' => release_info['activation_key']})
end
nodes = [{'uid' => 'master', 'facts' => facts}]
proxy_reporter = ProxyReporter::DLReleaseProxyReporter.new(up_reporter, nodes.size)
#FIXME: These parameters should be propagated from Nailgun. Maybe they should be saved
# in Release.json.
nodes_to_parser = [
{:uid => 'master',
:path_items => [
{:max_size => 1111280705, :path => '/var/www/nailgun/rhel', :weight => 3},
{:max_size => 195900000, :path => '/var/cache/yum/x86_64/6Server', :weight => 1},
]}
]
log_parser = @log_parsing ? LogParser::DirSizeCalculation.new(nodes_to_parser) : LogParser::NoParsing.new
context = Context.new(task_id, proxy_reporter, log_parser)
deploy_engine_instance = @deploy_engine.new(context)
Astute.logger.info "Using #{deploy_engine_instance.class} for release download."
deploy_engine_instance.deploy(nodes, attrs)
proxy_reporter.report({'status' => 'ready', 'progress' => 100})
end
def check_redhat_credentials(reporter, task_id, credentials)
ctx = Context.new(task_id, reporter)
begin
Astute::RedhatChecker.new(ctx, credentials).check_redhat_credentials
rescue Astute::RedhatCheckingError => e
Astute.logger.error("Error #{e.message}")
raise StopIteration
rescue Exception => e
Astute.logger.error("Unexpected error #{e.message} traceback #{e.format_backtrace}")
raise StopIteration
end
end
def check_redhat_licenses(reporter, task_id, credentials, nodes=nil)
ctx = Context.new(task_id, reporter)
begin
Astute::RedhatChecker.new(ctx, credentials).check_redhat_licenses(nodes)
rescue Astute::RedhatCheckingError => e
Astute.logger.error("Error #{e.message}")
raise StopIteration
rescue Exception => e
Astute.logger.error("Unexpected error #{e.message} traceback #{e.format_backtrace}")
raise StopIteration
end
end
private
def report_result(result, reporter)
default_result = {'status' => 'ready', 'progress' => 100}
result = {} unless result.instance_of?(Hash)
status = default_result.merge(result)
reporter.report(status)
end
def sleep_not_greater_than(sleep_time, &block)
time = Time.now.to_f
block.call
time = time + sleep_time - Time.now.to_f
sleep (time) if time > 0
end
def create_engine(engine_attrs, reporter)
begin
Astute.logger.info("Trying to instantiate cobbler engine: #{engine_attrs.inspect}")
Astute::Provision::Cobbler.new(engine_attrs)
rescue
Astute.logger.error("Error occured during cobbler initializing")
reporter.report({
'status' => 'error',
'error' => 'Cobbler can not be initialized',
@ -189,7 +253,7 @@ module Astute
raise StopIteration
end
end
def reboot_nodes(engine, nodes)
reboot_events = {}
nodes.each do |node|
@ -206,7 +270,7 @@ module Astute
end
reboot_events
end
def check_reboot_nodes(engine, reboot_events)
begin
Astute.logger.debug("Waiting for reboot to be complete: nodes: #{reboot_events.keys}")
@ -234,7 +298,7 @@ module Astute
end
failed_nodes
end
def report_about_progress(reporter, provisionLogParser, nodes_uids, target_uids, nodes)
begin
nodes_progress = provisionLogParser.progress_calculate(nodes_uids, nodes)
@ -248,9 +312,9 @@ module Astute
end
reporter.report({'nodes' => nodes_progress})
rescue => e
Astute.logger.warn "Some error occurred when parse logs for nodes progress: #{e.message}, trace: #{e.backtrace.inspect}"
Astute.logger.warn "Some error occurred when parse logs for nodes progress: #{e.message}, trace: #{e.format_backtrace}"
end
end
end
end

View File

@ -148,7 +148,7 @@ module Astute
end
rescue Exception => e
Astute.logger.warn "Some error occurred when parse logs for nodes progress: #{e.message}, "\
"trace: #{e.backtrace.inspect}"
"trace: #{e.format_backtrace}"
end
end
ctx.reporter.report('nodes' => nodes_to_report) if nodes_to_report.any?

View File

@ -0,0 +1,168 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Astute
class RedhatCheckingError < Exception; end
class RedhatChecker
def initialize(ctx, credentials)
@ctx = ctx
@username = Shellwords.escape(credentials['redhat']['username'])
@password = Shellwords.escape(credentials['redhat']['password'])
satellite = credentials['redhat']['satellite']
if credentials['redhat']['license_type'] == 'rhn' && satellite && !satellite.empty?
@satellite_server = Shellwords.escape(satellite)
end
release_name = credentials['release_name']
@network_error = 'Unable to reach host cdn.redhat.com. ' + \
'Please check your Internet connection.'
@user_does_not_have_licenses = 'Could not find any valid Red Hat ' + \
'OpenStack subscriptions. Contact your Red Hat sales representative ' + \
'to get the proper subscriptions associated with your account: '+ \
'https://access.redhat.com/site/solutions/368643. If you are still ' + \
'encountering issues, contact Mirantis Support.'
@not_enough_licenses = "Your account has only %d licenses " + \
'available to deploy Red Hat OpenStack. Contact your Red Hat sales ' + \
'representative to get the proper subscriptions associated with your ' + \
'account. https://access.redhat.com/site/solutions/368643'
@check_credentials_success = "Account information for #{release_name} " + \
'has been successfully modified.'
@satellite_error = 'Unable to communicate with RHN Satellite Server. ' + \
'Please check host and try again.'
@common_errors = {
/^Network error|^Remote server error/ => @network_error,
/The requested URL returned error|Couldn't resolve host|couldn't connect to host/ => @satellite_error,
/^Invalid username or password/ => 'Invalid username or password. ' + \
'To create a login, please visit https://www.redhat.com/wapps/ugc/register.html'
}
end
# Checking redhat credentials and satellite server
def check_redhat_credentials
timeout = Astute.config[:REDHAT_CHECK_CREDENTIALS_TIMEOUT]
check_credentials_cmd = 'subscription-manager orgs ' + \
"--username #{@username} " + \
"--password #{@password}"
# check user/password
response = exec_cmd_with_timeout(check_credentials_cmd, timeout, @network_error)
# checking user/password is succeed, than try to check satellite server if it set
if @satellite_server && !contain_errors?(response.results[:data])
check_server_satellite_cmd = 'curl -k -f -L -v --silent -o /dev/null ' + \
"http://#{@satellite_server}/pub/RHN-ORG-TRUSTED-SSL-CERT"
response = exec_cmd_with_timeout(check_server_satellite_cmd, timeout, @satellite_error)
end
report(response.results[:data], @check_credentials_success)
end
# Check redhat linceses and return message, if not enough licenses
def check_redhat_licenses(nodes=nil)
timeout = Astute.config[:REDHAT_GET_LICENSES_POOL_TIMEOUT]
get_redhat_licenses_cmd = 'get_redhat_licenses ' + \
"#{@username} " + \
"#{@password}"
response = exec_cmd_with_timeout(get_redhat_licenses_cmd, timeout, @network_error)
licenses_count = nil
begin
licenses_pool = JSON.load(response.results[:data][:stdout])
licenses_count = licenses_pool['openstack_licenses_physical_hosts_count']
rescue JSON::ParserError
report(response.results[:data])
return
end
if licenses_count <= 0
report_error(@user_does_not_have_licenses)
elsif nodes && licenses_count < nodes.count
report_success(format(@not_enough_licenses, licenses_count))
else
report_success
end
end
private
def report(result, success_msg=nil)
stdout = result[:stdout]
stderr = result[:stderr]
exit_code = result[:exit_code]
if contain_errors?(result)
err_msg = "Unknown error Stdout: #{stdout} Stderr: #{stderr}"
error = get_error(result) || err_msg
Astute.logger.error(err_msg)
report_error(error)
else
report_success(success_msg)
end
end
def contain_errors?(data)
get_error(data) || data[:exit_code] != 0
end
def get_error(result)
@common_errors.each_pair do |regex, msg|
return msg if regex.match(result[:stdout])
return msg if regex.match(result[:stderr])
end
nil
end
def report_success(msg=nil)
success_msg = {'status' => 'ready', 'progress' => 100}
success_msg.merge!({'msg' => msg}) if msg
@ctx.reporter.report(success_msg)
end
# Report error and raise exception
def report_error(msg)
@ctx.reporter.report({'status' => 'error', 'error' => msg, 'progress' => 100})
raise RedhatCheckingError.new(msg)
end
def exec_cmd_with_timeout(cmd, timeout, timeout_expired_msg)
shell = MClient.new(@ctx, 'execute_shell_command', ['master'])
begin
Timeout.timeout(timeout) do
response = shell.execute(:cmd => cmd).first
report_error(timeout_expired_msg) unless response
return response
end
rescue Timeout::Error
Astute.logger.warn("Time out error for shell command '#{cmd}'")
report_error(timeout_expired_msg)
end
end
end
end

View File

@ -26,25 +26,54 @@ STATES = {
}
module Astute
class ProxyReporter
def initialize(up_reporter)
@up_reporter = up_reporter
@nodes = []
end
def report(data)
nodes_to_report = []
nodes = (data['nodes'] or [])
nodes.each do |node|
node = node_validate(node)
nodes_to_report << node if node
module ProxyReporter
class DeploymentProxyReporter
def initialize(up_reporter)
@up_reporter = up_reporter
@nodes = []
end
# Let's report only if nodes updated
if nodes_to_report.any?
data['nodes'] = nodes_to_report
def report(data)
Astute.logger.debug("Data received by DeploymetProxyReporter to report it up: #{data.inspect}")
report_new_data(data)
end
private
def report_new_data(data)
if data['nodes']
nodes_to_report = get_nodes_to_report(data['nodes'])
# Let's report only if nodes updated
return if nodes_to_report.empty?
# Update nodes attributes in @nodes.
update_saved_nodes(nodes_to_report)
data['nodes'] = nodes_to_report
end
data.merge!(get_overall_status(data))
@up_reporter.report(data)
end
def get_overall_status(data)
status = data['status']
error_nodes = @nodes.select { |n| n['status'] == 'error' }.map{ |n| n['uid'] }
msg = data['error']
if status == 'ready' && error_nodes.any?
status = 'error'
msg = "Some error occured on nodes #{error_nodes.inspect}"
end
progress = data['progress']
{'status' => status, 'error' => msg, 'progress' => progress}.reject{|k,v| v.nil?}
end
def get_nodes_to_report(nodes)
nodes.map{ |node| node_validate(node) }.compact
end
def update_saved_nodes(new_nodes)
# Update nodes attributes in @nodes.
nodes_to_report.each do |node|
new_nodes.each do |node|
saved_node = @nodes.select {|x| x['uid'] == node['uid']}.first # NOTE: use nodes hash
if saved_node
node.each {|k, v| saved_node[k] = v}
@ -53,72 +82,102 @@ module Astute
end
end
end
end
private
def node_validate(node)
# Validate basic correctness of attributes.
err = []
if node['status']
err << "Status provided #{node['status']} is not supported" unless STATES[node['status']]
elsif node['progress']
err << "progress value provided, but no status"
end
err << "Node uid is not provided" unless node['uid']
if err.any?
msg = "Validation of node: #{node.inspect} for report failed: #{err.join('; ')}."
Astute.logger.error(msg)
raise msg
end
def node_validate(node)
# Validate basic correctness of attributes.
err = []
if node['status'].nil?
err << "progress value provided, but no status" unless node['progress'].nil?
else
err << "Status provided #{node['status']} is not supported" if STATES[node['status']].nil?
end
unless node['uid']
err << "Node uid is not provided"
end
if err.any?
msg = "Validation of node: #{node.inspect} for report failed: #{err.join('; ')}."
Astute.logger.error(msg)
raise msg
end
# Validate progress field.
unless node['progress'].nil?
if node['progress'] > 100
Astute.logger.warn("Passed report for node with progress > 100: "\
"#{node.inspect}. Adjusting progress to 100.")
# Validate progress field.
if node['progress']
if node['progress'] > 100
Astute.logger.warn("Passed report for node with progress > 100: "\
"#{node.inspect}. Adjusting progress to 100.")
node['progress'] = 100
end
if node['progress'] < 0
Astute.logger.warn("Passed report for node with progress < 0: "\
"#{node.inspect}. Adjusting progress to 0.")
node['progress'] = 0
end
end
if node['status'] && ['provisioned', 'ready'].include?(node['status']) && node['progress'] != 100
Astute.logger.warn("In #{node['status']} state node should have progress 100, "\
"but node passed: #{node.inspect}. Setting it to 100")
node['progress'] = 100
end
if node['progress'] < 0
Astute.logger.warn("Passed report for node with progress < 0: "\
"#{node.inspect}. Adjusting progress to 0.")
node['progress'] = 0
end
end
if not node['status'].nil? and ['provisioned', 'ready'].include?(node['status']) and node['progress'] != 100
Astute.logger.warn("In #{node['status']} state node should have progress 100, "\
"but node passed: #{node.inspect}. Setting it to 100")
node['progress'] = 100
end
# Comparison with previous state.
saved_node = @nodes.select {|x| x['uid'] == node['uid']}.first
unless saved_node.nil?
saved_status = (STATES[saved_node['status']] or 0)
node_status = (STATES[node['status']] or saved_status)
saved_progress = (saved_node['progress'] or 0)
node_progress = (node['progress'] or saved_progress)
# Comparison with previous state.
saved_node = @nodes.select {|x| x['uid'] == node['uid']}.first
if saved_node
saved_status = STATES[saved_node['status']].to_i
node_status = STATES[node['status']] || saved_status
saved_progress = saved_node['progress'].to_i
node_progress = node['progress'] || saved_progress
if node_status < saved_status
Astute.logger.warn("Attempt to assign lower status detected: "\
"Status was: #{saved_status}, attempted to "\
"assign: #{node_status}. Skipping this node (id=#{node['uid']})")
return
end
if node_progress < saved_progress and node_status == saved_status
Astute.logger.warn("Attempt to assign lesser progress detected: "\
"Progress was: #{saved_progress}, attempted to "\
"assign: #{node_progress}. Skipping this node (id=#{node['uid']})")
return
if node_status < saved_status
Astute.logger.warn("Attempt to assign lower status detected: "\
"Status was: #{saved_status}, attempted to "\
"assign: #{node_status}. Skipping this node (id=#{node['uid']})")
return
end
if node_progress < saved_progress && node_status == saved_status
Astute.logger.warn("Attempt to assign lesser progress detected: "\
"Progress was: #{saved_progress}, attempted to "\
"assign: #{node_progress}. Skipping this node (id=#{node['uid']})")
return
end
# We need to update node here only if progress is greater, or status changed
return if node.select{|k, v| saved_node[k] != v }.empty?
end
# We need to update node here only if progress is greater, or status changed
return if node.select{|k, v| not saved_node[k].eql?(v)}.empty?
node
end
end
class DLReleaseProxyReporter <DeploymentProxyReporter
def initialize(up_reporter, amount)
@amount = amount
super(up_reporter)
end
node
def report(data)
Astute.logger.debug("Data received by DLReleaseProxyReporter to report it up: #{data.inspect}")
report_new_data(data)
end
private
def calculate_overall_progress
@nodes.inject(0) { |sum, node| sum + node['progress'].to_i } / @amount
end
def get_overall_status(data)
status = data['status']
error_nodes = @nodes.select {|n| n['status'] == 'error'}.map{|n| n['uid']}
msg = data['error']
err_msg = "Cannot download release on nodes #{error_nodes.inspect}" if error_nodes.any?
if status == 'error'
msg ||= err_msg
elsif status == 'ready' and err_msg
msg = err_msg
status = 'error'
end
progress = data['progress'] || calculate_overall_progress
{'status' => status, 'error' => msg, 'progress' => progress}.reject{|k,v| v.nil?}
end
end
end
end

View File

@ -0,0 +1,30 @@
metadata :name => "Execute shell command",
:description => "Execute shell command",
:author => "Mirantis Inc.",
:license => "Apache License 2.0",
:version => "0.0.1",
:url => "http://mirantis.com",
:timeout => 600
action "execute", :description => "Execute shell command" do
input :cmd,
:prompt => "Shell command",
:description => "Shell command for running",
:type => :string,
:validation => '.*',
:optional => false,
:maxlength => 0
output :stdout,
:description => "Output from #{:cmd}",
:display_as => "Output"
output :stderr,
:description => "Stderr from #{:cmd}",
:display_as => "Stderr"
output :exit_code,
:description => "Exit code of #{:cmd}",
:display_as => "Exit code"
end

View File

@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module MCollective
module Agent
class Execute_shell_command < RPC::Agent
action 'execute' do
reply[:stdout], reply[:stderr], reply[:exit_code] = runcommand(request[:cmd])
end
private
def runcommand(cmd)
# We cannot use Shell from puppet because
# version 2.3.1 has bug, with returning wrong exit
# code in some cases, in newer version mcollective
# it was fixed
# https://github.com/puppetlabs/marionette-collective
# /commit/10f163550bc6395f1594dacb9f15a86d4a3fde27
# So, it's just fixed code from Shell#runcommand
thread = Thread.current
stdout = ''
stderr = ''
status = systemu(cmd, {'stdout' => stdout, 'stderr' => stderr}) do |cid|
begin
while(thread.alive?)
sleep 0.1
end
Process.waitpid(cid) if Process.getpgid(cid)
rescue SystemExit
rescue Errno::ESRCH
rescue Errno::ECHILD
rescue Exception => e
Log.info("Unexpected exception received while waiting for child process: #{e.class}: #{e}")
end
end
[stdout, stderr, status.exitstatus]
end
end
end
end

View File

@ -157,7 +157,7 @@ module MCollective
end
def runonce_background
cmd = [@puppetd, "--onetime", "--logdest", 'syslog']
cmd = [@puppetd, "--onetime", "--ignorecache", "--logdest", 'syslog']
unless request[:forcerun]
if @splaytime && @splaytime > 0

View File

@ -19,6 +19,7 @@ SimpleCov.start
require 'tempfile'
require 'tmpdir'
require 'fileutils'
require 'date'
require 'yaml'
require 'rspec'
@ -26,6 +27,7 @@ require 'rspec'
require 'rspec/autorun'
require File.join(File.dirname(__FILE__), '../lib/astute')
Dir[File.join(File.dirname(__FILE__), 'unit/fixtures/*.rb')].each { |file| require file }
# NOTE(mihgen): I hate to wait for unit tests to complete,
# resetting time to sleep significantly increases tests speed
@ -46,7 +48,10 @@ module SpecHelpers
stubs(:progress=)
unless timeout.nil?
expects(:timeout=).with(timeout)
else
stubs(:timeout=)
end
if discover_nodes.nil?
stubs(:discover)
else
@ -65,4 +70,16 @@ module SpecHelpers
stubs(:agent).returns('mc_stubbed_agent')
end
end
def mock_ctx(parser=nil)
parser ||= Astute::LogParser::NoParsing.new
ctx = mock
ctx.stubs(:task_id)
ctx.stubs(:deploy_log_parser).returns(parser)
reporter = mock
ctx.stubs(:reporter).returns(reporter)
ctx
end
end

View File

@ -0,0 +1,56 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require File.join(File.dirname(__FILE__), '../spec_helper')
describe Astute::DeploymentEngine do
include SpecHelpers
class Engine < Astute::DeploymentEngine; end
let(:ctx) { mock_ctx }
let(:deployer) { Engine.new(ctx) }
describe '#attrs_ha' do
def only_controllers(nodes)
nodes.select { |node| node['role'] == 'controller' }
end
it 'should set last_controller' do
attrs = deployer.attrs_ha(Fixtures.ha_nodes, {})
attrs['last_controller'].should == only_controllers(Fixtures.ha_nodes).last['fqdn'].split(/\./)[0]
end
it 'should assign primary-controller role for first node if primary-controller not set directly' do
attrs = deployer.attrs_ha(Fixtures.ha_nodes, {})
primary = attrs['nodes'].find { |node| node['role'] == 'primary-controller' }
primary.should_not be_nil
primary['fqdn'].should == only_controllers(Fixtures.ha_nodes).first['fqdn']
end
it 'should not assign primary-controller role for first node if primary-controller set directly' do
nodes = Fixtures.ha_nodes
last_node = only_controllers(nodes).last
last_node['role'] = 'primary-controller'
attrs = deployer.attrs_ha(deep_copy(nodes), {})
primary = attrs['nodes'].select { |node| node['role'] == 'primary-controller' }
primary.length.should == 1
primary[0]['fqdn'].should == last_node['fqdn']
end
end
end

View File

@ -0,0 +1,58 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.common_attrs
{
"args" => {
"attributes" => {
"storage_network_range" => "172.16.0.0/24",
"auto_assign_floating_ip" => false,
"mysql" => {
"root_password" => "Z2EqsZo5"
},
"keystone" => {
"admin_token" => "5qKy0i63",
"db_password" => "HHQ86Rym",
"admin_tenant" => "admin"
},
"nova" => {
"user_password" => "h8RY8SE7",
"db_password" => "Xl9I51Cb"
},
"glance" => {
"user_password" => "nDlUxuJq",
"db_password" => "V050pQAn"
},
"rabbit" => {
"user" => "nova",
"password" => "FLF3txKC"
},
"management_network_range" => "192.168.0.0/24",
"public_network_range" => "240.0.1.0/24",
"fixed_network_range" => "10.0.0.0/24",
"floating_network_range" => "240.0.0.0/24"
},
"task_uuid" => "19d99029-350a-4c9c-819c-1f294cf9e741",
"nodes" => Fixtures.common_nodes,
"controller_nodes" => Fixtures.common_nodes.select { |node| node['role'] == 'controller'}
},
"method" => "deploy",
"respond_to" => "deploy_resp"
}
end
end

View File

@ -0,0 +1,176 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.common_nodes
[
{
"mac" => "52:54:00:0E:B8:F5",
"status" => "provisioning",
"uid" => "devnailgun.mirantis.com",
"error_type" => nil,
"fqdn" => "devnailgun.mirantis.com",
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.2/24"
}, {
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.2/24"
}, {
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
}, {
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
}, {
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.2/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 1,
"ip" => "10.20.0.200",
"role" => "controller",
'meta' => meta
}, {
"mac" => "52:54:00:50:91:DD",
"status" => "provisioning",
"uid" => 2,
"error_type" => nil,
"fqdn" => "slave-2.mirantis.com",
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.3/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.3/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.3/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 2,
"ip" => "10.20.0.221",
"role" => "compute",
'meta' => meta
}, {
"mac" => "52:54:00:C3:2C:28",
"status" => "provisioning",
"uid" => 3,
"error_type" => nil,
"fqdn" => "slave-3.mirantis.com",
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.4/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.4/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.4/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 3,
"ip" => "10.20.0.68",
"role" => "compute",
'meta' => meta
}
]
end
def self.meta
{
'interfaces' => [
{
'name' => 'eth1',
}, {
'name' => 'eth0',
}
]
}
end
end

View File

@ -0,0 +1,29 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.ha_attrs
attrs = common_attrs
attrs['args']['nodes'] = common_nodes + ha_nodes
attrs['args']['attributes']['deployment_mode'] = "ha"
attrs['args']['attributes']['management_vip'] = "192.168.0.111"
attrs['args']['attributes']['public_vip'] = "240.0.1.111"
attrs
end
end

View File

@ -0,0 +1,121 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.ha_nodes
common_nodes + [
{
"mac" => "52:54:00:0E:88:88",
"status" => "provisioned",
"uid" => "4",
"error_type" => nil,
"fqdn" => "controller-4.mirantis.com",
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.5/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.5/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.5/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 4,
"ip" => "10.20.0.205",
"role" => "controller",
'meta' => meta
},
{
"mac" => "52:54:00:0E:99:99",
"status" => "provisioned",
"uid" => "5",
"error_type" => nil,
"fqdn" => "controller-5.mirantis.com",
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.6/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.6/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.6/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 5,
"ip" => "10.20.0.206",
"role" => "controller",
'meta' => meta
}
]
end
end

View File

@ -0,0 +1,81 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.multiroles_attrs
attrs = common_attrs
attrs['args']['nodes'] = [
{
"mac" => "52:54:00:0E:88:88",
"status" => "provisioned",
"uid" => "4",
"error_type" => nil,
"fqdn" => "controller_compute-4.mirantis.com",
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.5/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.5/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.5/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 4,
"ip" => "10.20.0.205",
"role" =>
[
"controller",
"compute"
],
'meta' => meta
}
]
controller_nodes = attrs['args']['nodes'].select{|n| n['role'].include?('controller')}.map { |e| deep_copy e }
controller_nodes.each {|n| n['role'] = 'controller' }
attrs['args']['controller_nodes'] = controller_nodes
attrs
end
end

View File

@ -273,6 +273,199 @@ describe LogParser do
calculated_nodes = deployment_parser_wrapper('multinode', nodes)
calculated_nodes.each {|node| node['statistics']['pcc'].should > 0.94}
end
end
context "Dirsize-based progress calculation" do
def create_dir_with_size(size, given_opts={})
raise "The required size should be a non-negative number" if size < 0
default_opts = {
:chunksize => 10000,
:tmpdir => Dir::tmpdir,
:objects => [],
}
opts = default_opts.merge(given_opts)
if !opts[:chunksize].instance_of?(Fixnum) || opts[:chunksize] <= 0
raise "The 'chunksize' option should be a positive number"
end
raise "The 'tmpdir' option should be a path to a existent directory" if !opts[:tmpdir].instance_of?(String)
raise "The 'objects' option should be an array" if !opts[:objects].instance_of?(Array)
dir = Dir::mktmpdir(nil, opts[:tmpdir])
opts[:objects] << dir
chunk = 'A' * opts[:chunksize]
while size >= opts[:chunksize]
file = Tempfile::new('prefix', dir)
file.write(chunk)
file.close
opts[:objects] << file
size -= opts[:chunksize]
end
if size > 0
file = Tempfile::new('prefix', dir)
file.write('A' * size)
file.close
opts[:objects] << file
end
return {:path => dir, :objects => opts[:objects]}
end
it "should correctly calculate size of directory" do
size = 10**6
dir_info = create_dir_with_size(size)
dir = dir_info[:path]
nodes = [
{:uid => '1',
:path_items => [
{:max_size => size*100/75,
:path => dir}
]
}
]
correct_progress = [
{'uid' => '1',
'progress' => 75}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
dirsize_parser.progress_calculate(['1'], nil).should eql(correct_progress)
FileUtils::remove_entry_secure dir
end
it "should correctly calculate size of nested directories" do
size = 10**6
dir_info = create_dir_with_size(size)
dir = dir_info[:path]
dir_info = create_dir_with_size(size, {:tmpdir => dir, :objects => dir_info[:objects]})
nodes = [
{:uid => '1',
:path_items => [
{:max_size => size*4,
:path => dir}
]
}
]
correct_progress = [
{'uid' => '1',
'progress' => 50}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
dirsize_parser.progress_calculate(['1'], nil).should eql(correct_progress)
FileUtils::remove_entry_secure dir
end
it "should return zero if there is no directory" do
nodes = [
{:uid => '1',
:path_items => [
{:max_size => 10000,
:path => '/the-dir-that-should-not-exist'}
]
}
]
correct_progress = [
{'uid' => '1',
'progress' => 0}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
dirsize_parser.progress_calculate(['1'], nil).should eql(correct_progress)
end
it "should return zero if no items is propagated" do
nodes = [
{:uid => '1',
:path_items => []
}
]
correct_progress = [
{'uid' => '1',
'progress' => 0}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
dirsize_parser.progress_calculate(['1'], nil).should eql(correct_progress)
end
end
context "Dirsize-based weight reassignment" do
it "should correctly assign weights to unweighted items" do
nodes = [
{:uid => '1',
:path_items => [{}, {}, {}, {}]
}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
dirsize_parser.nodes.first[:path_items].each{|n| n[:weight].should eql(0.25)}
end
it "should correctly recalculate weights of weighted items" do
nodes = [
{:uid => '1',
:path_items => [
{:weight => 10},
{:weight => 30},
]
}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
items = dirsize_parser.nodes.first[:path_items]
items[0][:weight].should eql(0.25)
items[1][:weight].should eql(0.75)
end
it "should correctly recalculate weights of mixed items" do
nodes = [
{:uid => '1',
:path_items => [
{:weight => 10},
{:weight => 30},
{}, {}
]
}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
items = dirsize_parser.nodes.first[:path_items]
items[0][:weight].should eql(0.125)
items[1][:weight].should eql(0.375)
items[2][:weight].should eql(0.25)
items[3][:weight].should eql(0.25)
end
it "should raise exception if a negative weight propagated" do
nodes = [
{:uid => '1',
:path_items => [
{:weight => -10},
]
}
]
expect{Astute::LogParser::DirSizeCalculation.new(nodes)}.to \
raise_error("Weight should be a non-negative number")
end
it "should drop items with zero weight" do
nodes = [
{:uid => '1',
:path_items => [
{:weight => 0},
{:weight => 0},
]
}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
dirsize_parser.nodes.first[:path_items].length.should eql(0)
end
it "should not change initialization attribute" do
nodes = [
{:uid => '1',
:path_items => [
{:weight => 0},
{:weight => 5},
{}
]
}
]
dirsize_parser = Astute::LogParser::DirSizeCalculation.new(nodes)
dirsize_parser.nodes.should_not eql(nodes)
end
end
end

View File

@ -25,141 +25,10 @@ describe "NailyFact DeploymentEngine" do
@ctx.stubs(:reporter).returns(reporter)
reporter.stubs(:report)
@deploy_engine = Astute::DeploymentEngine::NailyFact.new(@ctx)
meta = {
'interfaces' => [
{
'name' => 'eth1',
}, {
'name' => 'eth0',
}
]
}
@data = {"args" =>
{"attributes" =>
{"storage_network_range" => "172.16.0.0/24", "auto_assign_floating_ip" => false,
"mysql" => {"root_password" => "Z2EqsZo5"},
"keystone" => {"admin_token" => "5qKy0i63", "db_password" => "HHQ86Rym", "admin_tenant" => "admin"},
"nova" => {"user_password" => "h8RY8SE7", "db_password" => "Xl9I51Cb"},
"glance" => {"user_password" => "nDlUxuJq", "db_password" => "V050pQAn"},
"rabbit" => {"user" => "nova", "password" => "FLF3txKC"},
"management_network_range" => "192.168.0.0/24",
"public_network_range" => "240.0.1.0/24",
"fixed_network_range" => "10.0.0.0/24",
"floating_network_range" => "240.0.0.0/24"},
"task_uuid" => "19d99029-350a-4c9c-819c-1f294cf9e741",
"nodes" => [{"mac" => "52:54:00:0E:B8:F5", "status" => "provisioning",
"uid" => "devnailgun.mirantis.com", "error_type" => nil,
"fqdn" => "devnailgun.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.2/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.2/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104,
"ip" => "172.16.1.2/24", "netmask" => "255.255.255.0",
"brd" => "172.16.1.255"}],
"id" => 1,
"ip" => "10.20.0.200",
"role" => "controller",
'meta' => meta},
{"mac" => "52:54:00:50:91:DD", "status" => "provisioning",
"uid" => 2, "error_type" => nil,
"fqdn" => "slave-2.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.3/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.3/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104,
"ip" => "172.16.1.3/24", "netmask" => "255.255.255.0",
"brd" => "172.16.1.255"}],
"id" => 2,
"ip" => "10.20.0.221",
"role" => "compute",
'meta' => meta},
{"mac" => "52:54:00:C3:2C:28", "status" => "provisioning",
"uid" => 3, "error_type" => nil,
"fqdn" => "slave-3.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.4/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.4/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104,
"ip" => "172.16.1.4/24", "netmask" => "255.255.255.0",
"brd" => "172.16.1.255"}],
"id" => 3,
"ip" => "10.20.0.68",
"role" => "compute",
'meta' => meta}]},
"method" => "deploy",
"respond_to" => "deploy_resp"}
@data['args']['attributes']['controller_nodes'] = @data['args']['nodes'].
select { |node| node['role'] == 'controller'}
ha_nodes = @data['args']['nodes'] +
[{"mac" => "52:54:00:0E:88:88", "status" => "provisioned",
"uid" => "4", "error_type" => nil,
"fqdn" => "controller-4.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.5/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.5/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104,
"ip" => "172.16.1.5/24", "netmask" => "255.255.255.0",
"brd" => "172.16.1.255"}],
"id" => 4,
"ip" => "10.20.0.205",
"role" => "controller",
'meta' => meta},
{"mac" => "52:54:00:0E:99:99", "status" => "provisioned",
"uid" => "5", "error_type" => nil,
"fqdn" => "controller-5.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.6/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.6/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104,
"ip" => "172.16.1.6/24", "netmask" => "255.255.255.0",
"brd" => "172.16.1.255"}],
"id" => 5,
"ip" => "10.20.0.206",
"role" => "controller",
'meta' => meta}]
@data_ha = Marshal.load(Marshal.dump(@data))
@data_ha['args']['nodes'] = ha_nodes
@data_ha['args']['attributes']['deployment_mode'] = "ha"
# VIPs are required for HA mode and should be passed from Nailgun (only in HA)
@data_ha['args']['attributes']['management_vip'] = "192.168.0.111"
@data_ha['args']['attributes']['public_vip'] = "240.0.1.111"
@data = Fixtures.common_attrs
@data_ha = Fixtures.ha_attrs
@data_mr = Fixtures.multiroles_attrs
end
it "it should call valid method depends on attrs" do
@ -190,13 +59,23 @@ describe "NailyFact DeploymentEngine" do
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, compute_nodes, instance_of(Fixnum), true).once
@deploy_engine.deploy(@data['args']['nodes'], @data['args']['attributes'])
end
it "multiroles for node should be support" do
@data_mr['args']['attributes']['deployment_mode'] = "multinode"
node_amount = @data_mr['args']['nodes'][0]['role'].size
# we got two calls, one for controller, and another for all(1) computes
Astute::Metadata.expects(:publish_facts).times(node_amount)
Astute::PuppetdDeployer.expects(:deploy).times(node_amount)
@deploy_engine.deploy(@data_mr['args']['nodes'], @data_mr['args']['attributes'])
end
it "ha deploy should not raise any exception" do
Astute::Metadata.expects(:publish_facts).at_least_once
controller_nodes = @data_ha['args']['nodes'].select{|n| n['role'] == 'controller'}
primary_nodes = [controller_nodes.shift]
compute_nodes = @data_ha['args']['nodes'].select{|n| n['role'] == 'compute'}
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_nodes, 0, false).once
controller_nodes.each do |n|
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, [n], 2, true).once
end
@ -212,14 +91,6 @@ describe "NailyFact DeploymentEngine" do
@deploy_engine.deploy([ctrl], @data_ha['args']['attributes'])
end
it "singlenode deploy should not raise any exception" do
@data['args']['attributes']['deployment_mode'] = "singlenode"
@data['args']['nodes'] = [@data['args']['nodes'][0]] # We have only one node in singlenode
Astute::Metadata.expects(:publish_facts).times(@data['args']['nodes'].size)
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, @data['args']['nodes'], instance_of(Fixnum), true).once
@deploy_engine.deploy(@data['args']['nodes'], @data['args']['attributes'])
end
describe 'Vlan manager' do
it 'Should set fixed_interface value' do
node = {
@ -245,12 +116,14 @@ describe "NailyFact DeploymentEngine" do
}
}
attrs = {
'network_manager' => 'VlanManager'
'novanetwork_parameters' => {
'network_manager' => 'VlanManager'
}
}
expect = {
"role" => "controller",
"uid"=>1,
"uid" => 1,
"network_data" => {"eth0.102" =>
{
@ -272,14 +145,11 @@ describe "NailyFact DeploymentEngine" do
}.to_json,
"fixed_interface" => "eth2",
"network_manager" => "VlanManager",
"management_interface" => "eth0.102",
"internal_address" => "192.168.0.2",
'management_address' => '192.168.0.2'
"novanetwork_parameters" => '{"network_manager":"VlanManager"}',
"management_interface" => "eth0.102"
}
Astute::Metadata.expects(:publish_facts).with(@ctx, node['uid'], expect)
@deploy_engine.create_facts(node, attrs)
@deploy_engine.create_facts(node, attrs).should == expect
end
end
end

View File

@ -217,15 +217,14 @@ describe Astute::Orchestrator do
end
it "remove_nodes do not fail if any of nodes failed"
before(:all) do
@data = {
"engine"=>{
"url"=>"http://localhost/cobbler_api",
"username"=>"cobbler",
"url"=>"http://localhost/cobbler_api",
"username"=>"cobbler",
"password"=>"cobbler"
},
},
"task_uuid"=>"a5c44b9a-285a-4a0c-ae65-2ed6b3d250f4",
"nodes" => [
{
@ -270,19 +269,19 @@ describe Astute::Orchestrator do
]
}.freeze
end
describe '#fast_provision' do
context 'cobler cases' do
it "raise error if cobler settings empty" do
expect {@orchestrator.fast_provision(@reporter, {}, @data['nodes'])}.
to raise_error(StopIteration)
end
end
context 'node state cases' do
before(:each) do
remote = mock() do
stubs(:call)
stubs(:call).with('login', 'cobbler', 'cobbler').returns('remotetoken')
@ -292,12 +291,12 @@ describe Astute::Orchestrator do
stubs(:new).returns(remote)
end
end
it "raises error if nodes list is empty" do
expect {@orchestrator.fast_provision(@reporter, @data['engine'], {})}.
to raise_error(/Nodes to provision are not provided!/)
end
it "try to reboot nodes from list" do
Astute::Provision::Cobbler.any_instance do
expects(:power_reboot).with('controller-1')
@ -305,39 +304,39 @@ describe Astute::Orchestrator do
@orchestrator.stubs(:check_reboot_nodes).returns([])
@orchestrator.fast_provision(@reporter, @data['engine'], @data['nodes'])
end
before(:each) { Astute::Provision::Cobbler.any_instance.stubs(:power_reboot).returns(333) }
context 'node reboot success' do
before(:each) { Astute::Provision::Cobbler.any_instance.stubs(:event_status).
returns([Time.now.to_f, 'controller-1', 'complete'])}
it "does not find failed nodes" do
Astute::Provision::Cobbler.any_instance.stubs(:event_status).
returns([Time.now.to_f, 'controller-1', 'complete'])
@orchestrator.fast_provision(@reporter, @data['engine'], @data['nodes'])
end
it "report about success" do
@reporter.expects(:report).with({'status' => 'ready', 'progress' => 100}).returns(true)
@orchestrator.fast_provision(@reporter, @data['engine'], @data['nodes'])
end
it "sync engine state" do
Astute::Provision::Cobbler.any_instance do
expects(:sync).once
end
@orchestrator.fast_provision(@reporter, @data['engine'], @data['nodes'])
end
end
context 'node reboot fail' do
before(:each) { Astute::Provision::Cobbler.any_instance.stubs(:event_status).
returns([Time.now.to_f, 'controller-1', 'failed'])}
it "should sync engine state" do
Astute::Provision::Cobbler.any_instance do
expects(:sync).once
@ -347,77 +346,128 @@ describe Astute::Orchestrator do
rescue
end
end
it "raise error if failed node find" do
expect {@orchestrator.fast_provision(@reporter, @data['engine'], @data['nodes'])}.to raise_error(StopIteration)
end
end
end
end
describe '#provision' do
before(:each) do
# Disable sleeping in test env (doubles the test speed)
def @orchestrator.sleep_not_greater_than(time, &block)
block.call
end
end
it "raises error if nodes list is empty" do
expect {@orchestrator.provision(@reporter, @data['task_uuid'], {})}.
to raise_error(/Nodes to provision are not provided!/)
end
it "prepare provision log for parsing" do
Astute::LogParser::ParseProvisionLogs.any_instance do
expects(:prepare).with(@data['nodes']).once
end
@orchestrator.stubs(:report_about_progress).returns()
@orchestrator.stubs(:node_type).returns([{'uid' => '1', 'node_type' => 'target' }])
@orchestrator.provision(@reporter, @data['task_uuid'], @data['nodes'])
end
it "ignore problem with parsing provision log" do
Astute::LogParser::ParseProvisionLogs.any_instance do
stubs(:prepare).with(@data['nodes']).raises
end
@orchestrator.stubs(:report_about_progress).returns()
@orchestrator.stubs(:node_type).returns([{'uid' => '1', 'node_type' => 'target' }])
@orchestrator.provision(@reporter, @data['task_uuid'], @data['nodes'])
end
it 'provision nodes using mclient' do
@orchestrator.stubs(:report_about_progress).returns()
@orchestrator.expects(:node_type).returns([{'uid' => '1', 'node_type' => 'target' }])
@orchestrator.provision(@reporter, @data['task_uuid'], @data['nodes'])
end
it "fail if timeout of provisioning is exceeded" do
Astute::LogParser::ParseProvisionLogs.any_instance do
stubs(:prepare).returns()
end
Timeout.stubs(:timeout).raises(Timeout::Error)
msg = 'Timeout of provisioning is exceeded.'
msg = 'Timeout of provisioning is exceeded.'
error_mgs = {'status' => 'error', 'error' => msg, 'nodes' => [{ 'uid' => '1',
'status' => 'error',
'error_msg' => msg,
'progress' => 100,
'error_type' => 'provision'}]}
@reporter.expects(:report).with(error_mgs).once
@orchestrator.provision(@reporter, @data['task_uuid'], @data['nodes'])
end
end
end
end
describe 'Red-hat checking' do
let(:credentials) do
{
'release_name' => 'RELEASE_NAME',
'redhat' => {
'username' => 'user',
'password' => 'password'
}
}
end
def mc_result(result)
[mock_mc_result({:data => result})]
end
def stub_rpc(stdout='')
mock_rpcclient.stubs(:execute).returns(mc_result(:exit_code => 0, :stdout => stdout, :stderr => ''))
end
describe '#check_redhat_credentials' do
it 'Should raise StopIteration in case of errors ' do
stub_rpc("Before\nInvalid username or password\nAfter")
expect do
@orchestrator.check_redhat_credentials(@reporter, @data['task_uuid'], credentials)
end.to raise_error(StopIteration)
end
it 'Should not raise errors ' do
stub_rpc
@orchestrator.check_redhat_credentials(@reporter, @data['task_uuid'], credentials)
end
end
describe '#check_redhat_licenses' do
it 'Should raise StopIteration in case of errors ' do
stub_rpc('{"openstack_licenses_physical_hosts_count":0}')
expect do
@orchestrator.check_redhat_licenses(@reporter, @data['task_uuid'], credentials)
end.to raise_error(StopIteration)
end
it 'Should not raise errors ' do
stub_rpc('{"openstack_licenses_physical_hosts_count":1}')
@orchestrator.check_redhat_licenses(@reporter, @data['task_uuid'], credentials)
end
end
end
end

View File

@ -25,7 +25,7 @@ describe "Puppetd" do
@ctx = mock
@ctx.stubs(:task_id)
@reporter = mock('reporter')
@ctx.stubs(:reporter).returns(ProxyReporter.new(@reporter))
@ctx.stubs(:reporter).returns(ProxyReporter::DeploymentProxyReporter.new(@reporter))
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
end

View File

@ -0,0 +1,213 @@
# -*- encoding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require File.join(File.dirname(__FILE__), '../spec_helper')
describe Astute::RedhatChecker do
include SpecHelpers
let(:redhat_credentials) do
{
'release_name' => 'RELEASE_NAME',
'redhat' => {
'license_type' => 'rhsm',
'username' => 'user',
'password' => 'password'
}
}
end
let(:reporter) { mock('reporter') }
let(:ctx) { Astute::Context.new('task-uuuid', reporter) }
let(:redhat_checker) { described_class.new(ctx, redhat_credentials) }
let!(:rpcclient) { mock_rpcclient }
let(:success_result) { {'status' => 'ready', 'progress' => 100} }
let(:invalid_user_password_msg) do
'Invalid username or password. ' + \
'To create a login, please visit https://www.redhat.com/wapps/ugc/register.html'
end
def mc_result(result)
[mock_mc_result({:data => result})]
end
def execute_returns(data)
rpcclient.expects(:execute).once.returns(mc_result(data))
end
def should_report_once(data)
reporter.expects(:report).once.with(data)
end
def should_report_error(data)
error_data = {'status' => 'error', 'progress' => 100}.merge(data)
reporter.expects(:report).once.with(error_data)
end
shared_examples 'redhat checker' do
it 'should handle network connection errors' do
execute_returns({
:exit_code => 0,
:stdout => "Text before\nNetwork error, unable to connect to server.\nText after"})
err_msg = 'Unable to reach host cdn.redhat.com. ' + \
'Please check your Internet connection.'
should_report_error({'error' => err_msg})
expect { execute_handler }.to raise_error(Astute::RedhatCheckingError)
end
it 'should handle wrong username/password errors' do
execute_returns({
:exit_code => 0,
:stdout => "Text before\nInvalid username or password\nText after"})
err_msg = invalid_user_password_msg
should_report_error({'error' => err_msg})
expect { execute_handler }.to raise_error(Astute::RedhatCheckingError)
end
it 'should handle uniq errors' do
execute_returns({
:exit_code => 1,
:stdout => "Uniq error stdout",
:stderr => "Uniq error stderr"})
err_msg = "Unknown error Stdout: Uniq error stdout Stderr: Uniq error stderr"
should_report_error({'error' => err_msg})
expect { execute_handler }.to raise_error(Astute::RedhatCheckingError)
end
end
describe '#check_redhat_credentials' do
let(:success_msg) { "Account information for RELEASE_NAME has been successfully modified." }
it_behaves_like 'redhat checker' do
def execute_handler
redhat_checker.check_redhat_credentials
end
end
it 'should be success with right credentials' do
execute_returns({:exit_code => 0})
should_report_once(success_result.merge({'msg' => success_msg}))
redhat_checker.check_redhat_credentials
end
context 'satellite server is set' do
let(:redhat_credentials) do
{
'release_name' => 'RELEASE_NAME',
'redhat' => {
'license_type' => 'rhn',
'username' => 'user',
'password' => 'password',
'satellite' => 'satellite.server.com'
}
}
end
let(:redhat_checker) { described_class.new(ctx, redhat_credentials) }
it 'success when all commands execute without an error' do
execute_returns({:exit_code => 0})
execute_returns({:exit_code => 0})
should_report_once(success_result.merge({'msg' => success_msg}))
redhat_checker.check_redhat_credentials
end
it 'fails user\password is wrong' do
err_msg = "Text before\nInvalid username or password\nText after"
execute_returns({:exit_code => 1, :stdout => err_msg })
should_report_error({'error' => invalid_user_password_msg})
expect { redhat_checker.check_redhat_credentials }.to raise_error(Astute::RedhatCheckingError)
end
it 'fails satellite server is wrong' do
err_msg = "text before\ncouldn't connect to host\ntext after"
rpcclient.expects(:execute).twice.returns(
mc_result({:exit_code => 0}),
mc_result({:exit_code => 1, :stdout => err_msg}))
err_msg = 'Unable to communicate with RHN Satellite Server. ' + \
'Please check host and try again.'
should_report_error({'error' => err_msg})
expect { redhat_checker.check_redhat_credentials }.to raise_error(Astute::RedhatCheckingError)
end
end
end
describe '#check_redhat_licenses' do
describe 'nodes parameter is nil' do
it_behaves_like 'redhat checker' do
def execute_handler
redhat_checker.check_redhat_credentials
end
end
it 'should be success if no errors' do
execute_returns({:exit_code => 0, :stdout => '{"openstack_licenses_physical_hosts_count":1}'})
should_report_once(success_result)
redhat_checker.check_redhat_licenses
end
end
describe 'nodes parameter is not nil' do
it_behaves_like 'redhat checker' do
def execute_handler
redhat_checker.check_redhat_licenses([1])
end
end
it 'should report ready if no errors' do
execute_returns({:exit_code => 0,
:stdout => '{"openstack_licenses_physical_hosts_count":1}'})
should_report_once(success_result)
nodes = [1]
redhat_checker.check_redhat_licenses(nodes)
end
it 'should report message if not enough licenses' do
execute_returns({:exit_code => 0,
:stdout => '{"openstack_licenses_physical_hosts_count":3}'})
err_msg = 'Your account has only 3 licenses available to deploy Red ' + \
'Hat OpenStack. Contact your Red Hat sales representative to ' + \
'get the proper subscriptions associated with your account. ' + \
'https://access.redhat.com/site/solutions/368643'
should_report_once({'progress' => 100, 'status' => 'ready', 'msg' => err_msg})
nodes = [1, 2, 3, 4]
redhat_checker.check_redhat_licenses(nodes)
end
end
end
end

View File

@ -25,7 +25,7 @@ describe "ProxyReporter" do
{'status' => 'deploying', 'uid' => '2',
'progress' => 54}]}
@up_reporter = mock('up_reporter')
@reporter = ProxyReporter.new(@up_reporter)
@reporter = ProxyReporter::DeploymentProxyReporter.new(@up_reporter)
end
it "reports first-come data" do

View File

@ -23,7 +23,7 @@ describe "SimplePuppet DeploymentEngine" do
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
@reporter = mock('reporter')
@reporter.stub_everything
@ctx.stubs(:reporter).returns(Astute::ProxyReporter.new(@reporter))
@ctx.stubs(:reporter).returns(Astute::ProxyReporter::DeploymentProxyReporter.new(@reporter))
@deploy_engine = Astute::DeploymentEngine::SimplePuppet.new(@ctx)
@env = YAML.load_file(File.join(File.dirname(__FILE__), "..", "..", "examples", "no_attrs.yaml"))
end
@ -60,14 +60,6 @@ describe "SimplePuppet DeploymentEngine" do
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
it "singlenode deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "singlenode"
@env['nodes'] = [@env['nodes'][0]] # We have only one node in singlenode
Astute::Metadata.expects(:publish_facts).never
Astute::PuppetdDeployer.expects(:deploy).once # one call for one node
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
it "ha_compact deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "ha_compact"
@env['nodes'].concat([{'uid'=>'c1', 'role'=>'controller'},
@ -79,7 +71,6 @@ describe "SimplePuppet DeploymentEngine" do
primary_ctrl_nodes = [controller_nodes.shift]
Astute::Metadata.expects(:publish_facts).never
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_ctrl_nodes, 0, false).once
controller_nodes.each do |n|
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, [n], 2, true).once
end
@ -92,30 +83,32 @@ describe "SimplePuppet DeploymentEngine" do
it "ha_full deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "ha_full"
@env['nodes'].concat([{'uid'=>'c1', 'role'=>'controller'}, {'uid'=>'c2', 'role'=>'controller'},
{'uid'=>'q1', 'role'=>'quantum'}, {'uid'=>'q2', 'role'=>'quantum'},
{'uid'=>'st1', 'role'=>'storage'}, {'uid'=>'st2', 'role'=>'storage'},
{'uid'=>'sw1', 'role'=>'primary-swift-proxy'}, {'uid'=>'sw2', 'role'=>'swift-proxy'},
{'uid'=>'o1', 'role'=>'other'}])
{'uid'=>'st1', 'role'=>'storage'}, {'uid'=>'st2', 'role'=>'storage'},
{'uid'=>'sw1', 'role'=>'primary-swift-proxy'}, {'uid'=>'sw2', 'role'=>'swift-proxy'},
{'uid'=>'o1', 'role'=>'other'}])
controller_nodes = @env['nodes'].select{|n| n['role'] == 'controller'}
primary_ctrl_nodes = [controller_nodes.shift]
compute_nodes = @env['nodes'].select{|n| n['role'] == 'compute'}
quantum_nodes = @env['nodes'].select {|n| n['role'] == 'quantum'}
storage_nodes = @env['nodes'].select {|n| n['role'] == 'storage'}
proxy_nodes = @env['nodes'].select {|n| n['role'] == 'swift-proxy'}
primary_proxy_nodes = @env['nodes'].select {|n| n['role'] == 'primary-swift-proxy'}
primary_nodes = primary_ctrl_nodes + primary_proxy_nodes
other_nodes = @env['nodes'] - controller_nodes - primary_nodes - quantum_nodes
other_nodes = @env['nodes'] - controller_nodes - primary_proxy_nodes - \
primary_ctrl_nodes - proxy_nodes - storage_nodes
Astute::Metadata.expects(:publish_facts).never
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_ctrl_nodes, 0, false).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_proxy_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, proxy_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, storage_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_ctrl_nodes, 2, true).once
controller_nodes.each do |n|
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, [n], 2, true).once
end
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_nodes, 2, true).once
# Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_ctrl_nodes, 0, false).once
# Astute::PuppetdDeployer.expects(:deploy).with(@ctx, quantum_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, other_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, quantum_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, other_nodes, instance_of(Fixnum), true).once
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
end