Add "Writing your First OpenStack Application" Guide

Written during a book sprint in Taipei, from March 30th 2015
to April 3rd 2015.

Co-Authored-By: Sean M. Collins <sean@coreitpro.com>
Co-Authored-By: Tom Fifield <tom@openstack.org>
Co-Authored-By: James Dempsey <jamesd@catalyst.net.nz>
Co-Authored-By: Nick Chase <nchase@mirantis.com>
Co-Authored-By: Christian Berendt <berendt@b1-systems.de>

Implements blueprint openstack-firstapp

Change-Id: I55ae32d0c04f641c818bda4714d9bc691a98e6b1
This commit is contained in:
Sean M. Collins 2015-04-14 15:04:06 -04:00 committed by Tom Fifield
parent 34d73033b1
commit c219133376
29 changed files with 4425 additions and 0 deletions

1
.gitignore vendored
View File

@ -18,3 +18,4 @@ target/
*~
.*.swp
.bak
build/

View File

@ -0,0 +1,44 @@
****************************************
Writing your First OpenStack Application
****************************************
This repo contains the "Writing your First OpenStack Application"
tutorial.
The tutorials works with an application that can be found at:
https://github.com/stackforge/faafo
--------------------------------
/bin
--------------------------------
This document was initially written in 'sprint' style.
/bin contains some useful scripts for the sprint, such as
pads2files which faciliates the creation of files from
an etherpad server using its API.
--------------------------------
/doc
--------------------------------
/doc contains a playground for the actual tutorial documentation
It's RST, built with sphinx.
The RST source includes conditional output logic, so specifying
tox -e libcloud
will invoke sphinx-build with -t libcloud, meaning sections
marked .. only:: libcloud in the RST will be built, while others
won't.
sphinx and openstackdoctheme are needed to build the docs
--------------------------------
/samples
--------------------------------
The code samples provided in the guide are sourced from files
in this directory. There is a sub-directory for each SDK.

View File

@ -0,0 +1,207 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
libcloud:
$(SPHINXBUILD) -t libcloud -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
openstacksdk:
$(SPHINXBUILD) -t openstacksdk -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
jclouds:
$(SPHINXBUILD) -t jclouds -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
fog:
$(SPHINXBUILD) -t fog -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dotnet:
$(SPHINXBUILD) -t dotnet -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
node:
$(SPHINXBUILD) -t node -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FirstApp.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FirstApp.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/FirstApp"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FirstApp"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

View File

@ -0,0 +1,54 @@
========
Appendix
========
Bootstrapping Your Network
--------------------------
Most cloud providers will provision all of the required network objects necessary to
boot an instance. An easy way to see if these have been created for you is to access
the Network Topology section of the OpenStack dashboard.
.. figure:: images/network-topology.png
:width: 920px
:align: center
:height: 622px
:alt: network topology view
:figclass: align-center
Specify a network during instance build
---------------------------------------
.. todo:: code for creating a networking using code
Requirements of the First App Application For OpenStack
-------------------------------------------------------
To be able to install the First App Application For OpenStack from PyPi you have to install
the following packages:
On openSUSE/SLES:
.. code-block:: shell
sudo zypper install -y python-devel and python-pip
On Fedora/CentOS/RHEL:
.. code-block:: shell
sudo yum install -y python-devel and python-pip
On Debian/Ubuntu:
.. code-block:: shell
sudo apt-get update
sudo apt-get install -y python-dev and python-pip
To easify this process you can simply run the following command, which will run the commands above, depending on the used distribution.
.. code-block: shell
curl -s http://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash

View File

@ -0,0 +1,273 @@
# -*- coding: utf-8 -*-
#
# FirstApp documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 11 12:27:57 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import openstackdocstheme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinxcontrib.blockdiag',
'sphinxcontrib.nwdiag',
'sphinx.ext.graphviz',
'sphinx.ext.todo'
# 'oslosphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FirstApp'
copyright = u'2015, OpenStack Docs Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FirstAppdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FirstApp.tex', u'FirstApp Documentation',
u'OpenStack Doc Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'firstapp', u'FirstApp Documentation',
[u'OpenStack Doc Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FirstApp', u'FirstApp Documentation',
u'OpenStack Doc Team', 'FirstApp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Set to True to enable printing of the TODO sections
todo_include_todos=False

View File

@ -0,0 +1,9 @@
digraph {
API -> Database [color=green];
API -> Database [color=orange];
Database -> API [color=red];
API -> Webinterface [color=red];
API -> "Queue Service" [color=orange];
"Queue Service" -> Worker [color=orange];
Worker -> API [color=green];
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

View File

@ -0,0 +1,7 @@
digraph {
rankdir=LR;
Queue [shape="doublecircle"];
API -> Queue;
Queue -> "Worker 1";
Queue -> "Worker 2";
}

View File

@ -0,0 +1,31 @@
.. FirstApp documentation master file, created by
sphinx-quickstart on Wed Feb 11 12:27:57 2015.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to FirstApp's documentation!
====================================
Contents:
.. toctree::
:maxdepth: 2
section1
section2
section3
section4
section5
section6
section7
section8
section9
appendix
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -0,0 +1,618 @@
============================
Section One: Getting Started
============================
Who should read this book
-------------------------
This book has been written for software developers who wish to deploy
applications to OpenStack clouds.
We've assumed that you're an experienced programmer, but that you haven't
necessarily created an application for cloud in general, or for OpenStack in
particular.
If you're already familiar with OpenStack, you'll save time learning about the
general concepts, and you'll still find value in learning how to work
programmatically with it's components.
What you will learn
-------------------
Deploying applications in a cloud environment can be very different from the
traditional siloed approachyou see in traditional IT, so in addition to learning
to deploy applications on OpenStack, you will also learn some best practices for
cloud application development. Overall, this guide covers the following:
* :doc:`/section1` - The most basic cloud application -- creating and destroying virtual resources
* :doc:`/section2` - The architecture of a sample cloud-based application
* :doc:`/section3` - The importance of message queues
* :doc:`/section4` - Scaling up and down in response to changes in application load
* :doc:`/section5` - Using object or block storage to create persistance
* :doc:`/section6` - Orchestrating your cloud for better control of the environment
* :doc:`/section7` - Networking choices and actions to help relieve potential congestion
* :doc:`/section8` - Advice for developers who may not have been exposed to operations tasks before
* :doc:`/section9` - Taking your application to the next level by spreading it across multiple regions or clouds
A general overview
------------------
This tutorial actually involves two applications; the first, a fractal
generator, simply uses mathematical equations to generate images. We'll provide
that application to you in its entirety, because really, it's just an excuse;
the real application we will be showing you is the code that enables you to make
use of OpenStack to run it. That application includes:
* Creating and destroying compute resources. (Those are the virtual machine instances on which the Fractals app runs.)
* Cloud-related architecture decisions, such as breaking individual functions out into microservices and modularizing them.
* Scaling up and down to customize the amount of available resources.
* Object and block storage for persistance of files and databases.
* Orchestration services to automatically adjust to the environment.
* Networking customization for better performance and segregation.
* A few other crazy things we think ordinary folks won't want to do ;).
Choosing your OpenStack SDK
---------------------------
Future versions of this book will cover completing these tasks with various
toolkits, such as the OpenStack SDK, and using various languages, such as Java
or Ruby. For now, however, this initial incarnation focuses on using Python with
Apache Libcloud. That said, if you're not a master Python programmer,
don't despair; the code is fairly straightforward, and should be readable to
anyone with a programming background.
If you're a developer for an alternate toolkit and would like to see this book
support it, great! Please feel free to submit alternate code snippets, or to
contact any of the authors or members of the Documentation team to coordinate.
Although this guide (initially) covers only Libcloud, you actually have several
choices when it comes to building an application for an OpenStack cloud.
These choices include:
============= ============= ================================================================= ====================================================
Language Name Description URL
============= ============= ================================================================= ====================================================
Python Libcloud A Python-based library managed by the Apache Foundation.
This library enables you to work with multiple types of clouds. https://libcloud.apache.org
Python OpenStack SDK A python-based libary specifically developed for OpenStack. https://github.com/stackforge/python-openstacksdk
Java jClouds A Java-based library. Like libcloud, it's also managed by the https://jclouds.apache.org
Apache Foundation and works with multiple types of clouds.
Ruby fog A Ruby-based SDK for multiple clouds. http://www.fogproject.org
node.js pkgcloud A Node.js-based SDK for multiple clouds. https://github.com/pkgcloud/pkgcloud
PHP php-opencloud A library for developers using PHP to work with OpenStack clouds. http://php-opencloud.com/
NET Framework OpenStack SDK A .NET based library that can be used to write C++ applications. https://www.nuget.org/packages/OpenStack-SDK-DotNet
for Microsoft
.NET
============= ============= ================================================================= ====================================================
A list of all available SDKs is available on the
`OpenStack wiki <https://wiki.openstack.org/wiki/SDKs>`_.
What you need
-------------
We assume you already have access to an OpenStack cloud.
You should have a project (tenant) with a quota of at least
6 instances. The Fractals application itself runs in Ubuntu, Debian, and Fedora-based and
openSUSE-based distributions, so you'll need to be creating instances using one
of these operating systems.
Interact with the cloud itself, you will also need to have
.. only:: dotnet
`OpenStack SDK for Microsoft .NET 0.9.1 or better installed <https://www.nuget.org/packages/OpenStack-SDK-DotNet>`_.
.. warning:: This document has not yet been completed for the .NET SDK
.. only:: fog
`fog 1.19 or better installed <http://www.fogproject.org/wiki/index.php?title=FOGUserGuide#Installing_FOG>`_ and working
with ruby gems 1.9
.. warning:: This document has not yet been completed for the fog SDK
.. only:: jclouds
`jClouds 1.8 or better installed <https://jclouds.apache.org/start/install>`_.
.. warning:: This document has not yet been completed for the jclouds SDK
.. only:: libcloud
`libcloud 0.15.1 or better installed <https://libcloud.apache.org/getting-started.html>`_.
.. only:: node
`a recent version of pkgcloud installed <https://github.com/pkgcloud/pkgcloud#getting-started>`_.
.. warning:: This document has not yet been completed for the pkgcloud SDK
.. only:: openstacksdk
the OpenStack SDK installed.
.. warning:: This document has not yet been completed for the OpenStack SDK
.. only:: phpopencloud
`a recent version of php-opencloud installed <http://docs.php-opencloud.com/en/latest/>`_.
.. warning:: This document has not yet been completed for the php-opencloud SDK
You will need the following 5 pieces of information, which you can obtain from
your cloud provider:
* auth URL
* username
* password
* project id or name (Projects are also known as tenants.)
* cloud region
You can also get this information by downloading the OpenStack RC file from the
OpenStack Dashboard. To download this file, log into the Horizon dashboard and
click Project->Access & Security->API Access->Download OpenStack RC file.
If you choose this route, be aware that the "auth URL" doesn't include the path.
In other words, if your openrc.sh file shows:
.. code-block:: bash
export OS_AUTH_URL=http://controller:5000/v2.0
the actual auth URL will be
.. code-block:: python
http://controller:5000
How you'll interact with OpenStack
----------------------------------
Throughout this tutorial, you'll be interacting with your OpenStack cloud
through code, using one of the SDKs listed in section "Choosing your OpenStack
SDK". In this initial version, the code snippets assume that you're using
libcloud.
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-1
:end-before: step-2
.. only:: libcloud
To try it out, add the following code to a Python script (or use an
interactive Python shell) by calling :code:`python -i`.
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-1
:end-before: step-2
.. only:: openstacksdk
.. code-block:: python
from openstack import connection
conn = connection.Connection(auth_url="http://controller:5000/v3",
user_name="your_auth_username", password="your_auth_password", ...)
.. Note:: We'll use the :code:`conn` object throughout the tutorial, so ensure you always have one handy.
.. only:: libcloud
.. Note:: If you receive the exception :code:`libcloud.common.types.InvalidCredsError: 'Invalid credentials with the provider'` while
trying to run one of the following API calls please double-check your credentials.
.. Note:: If your provider says they do not use regions, try a blank string ('') for the region_name.
Flavors and Images
------------------
In order to run your application, the first thing you'll need to do is create a
virtual machine, or launch an instance. This instance behaves (for all intents
and purposes) as a normal server.
In order to launch an instance, you will need to choose a flavor and an image.
The flavor is essentially the size of the instance, such as its number of CPUs,
amount of RAM and disk. An image is a prepared OS instalation from which your
instance is cloned. Keep in mind when booting instances that larger flavors can
be more expensive (in terms of resources, and therefore monetary cost, if you're
working in a public cloud) than smaller ones.
You can easily find out the images available in your cloud by
running some API calls:
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-2
:end-before: step-3
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-2
:end-before: step-3
You should see a result something like:
.. code-block:: python
<NodeImage: id=2cccbea0-cea9-4f86-a3ed-065c652adda5, name=ubuntu-14.04, driver=OpenStack ...>
<NodeImage: id=f2a8dadc-7c7b-498f-996a-b5272c715e55, name=cirros-0.3.3-x86_64, driver=OpenStack ...>
You can also get information on the various flavors:
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-3
:end-before: step-4
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-3
:end-before: step-4
This code should produce output something like:
.. code-block:: python
<OpenStackNodeSize: id=1, name=m1.tiny, ram=512, disk=1, bandwidth=None, price=0.0, driver=OpenStack, vcpus=1, ...>
<OpenStackNodeSize: id=2, name=m1.small, ram=2048, disk=20, bandwidth=None, price=0.0, driver=OpenStack, vcpus=1, ...>
<OpenStackNodeSize: id=3, name=m1.medium, ram=4096, disk=40, bandwidth=None, price=0.0, driver=OpenStack, vcpus=2, ...>
<OpenStackNodeSize: id=4, name=m1.large, ram=8192, disk=80, bandwidth=None, price=0.0, driver=OpenStack, vcpus=4, ...>
<OpenStackNodeSize: id=5, name=m1.xlarge, ram=16384, disk=160, bandwidth=None, price=0.0, driver=OpenStack, vcpus=8, ...>
Your images and flavors will be different, of course.
Choose an image and flavor to use for your first instance. To start with, we
only need about 1GB of RAM, 1 CPU and a GB of disk, so in this example, the
:code:`m1.small` flavor, which exceeds these requirements, in conjuction with
the Ubuntu image, is a safe choice.
The flavor and image you choose here will be used throughout this guide, so you
will need to change the IDs in the following tutorial sections to correspond to
your desired flavor and image.
If you don't see the image you want available in your cloud, you can usually
upload a new one - depending on your cloud's policy settings. There is a guide
on how to aquire images
`available here <http://docs.openstack.org/image-guide/content/ch_obtaining_images.html>`_.
Set the image and size variables to appropriate values for your cloud. We'll use
these in later sections.
First tell the connection to retrieve a specific image, using the ID of the
image you have chosen to work with in the previous section:
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-4
:end-before: step-5
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-4
:end-before: step-5
You should see output something like this:
.. code-block:: python
<NodeImage: id=2cccbea0-cea9-4f86-a3ed-065c652adda5, name=ubuntu-14.04, driver=OpenStack ...>
Next tell the script what flavor you want to use:
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-5
:end-before: step-6
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-5
:end-before: step-6
You should see output something like this:
.. code-block:: python
<OpenStackNodeSize: id=3, name=m1.medium, ram=4096, disk=40, bandwidth=None, price=0.0, driver=OpenStack, vcpus=2, ...>
Now you're ready to actually launch the instance.
Booting an instance
-------------------
Now that you have selected an image and flavor, use it to create an instance.
.. only:: libcloud
.. note:: The following instance creation assumes that you only have one
tenant network. If you have multiple tenant networks defined, you will
need to add a networks parameter to the create_node call. You'll know
this is the case if you see an error stating 'Exception: 400 Bad Request
Multiple possible networks found, use a Network ID to be more specific.'
See :doc:`/appendix` for details.
Start by creating the instance.
.. note:: An instance may be called a 'node' or 'server' by your SDK.
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-6
:end-before: step-7
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-6
:end-before: step-7
You should see output something like:
.. code-block:: python
<Node: uuid=1242d56cac5bcd4c110c60d57ccdbff086515133, name=testing, state=PENDING, public_ips=[], private_ips=[], provider=OpenStack ...>
.. only:: openstacksdk
.. code-block:: python
args = {
"name": "testing",
"flavorRef": flavor,
"imageRef": image,
}
instance = conn.compute.create_server(**args)
If you then output a list of existing instances...
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-7
:end-before: step-8
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-7
:end-before: step-8
... you should see the new instance appear.
.. only:: libcloud
.. code-block:: python
<Node: uuid=1242d56cac5bcd4c110c60d57ccdbff086515133, name=testing, state=RUNNING, public_ips=[], private_ips=[], provider=OpenStack ...>
.. only:: openstacksdk
.. code-block:: python
instances = conn.compute.list_servers()
for instance in instances:
print(instance)
Before we move on, there's one more thing you need to do.
Destroying an instance
----------------------
It is important to keep in mind that cloud resources (including running
instances you are no longer using) can cost money. Learning to remove cloud
resources will help you avoid any unexpected costs incurred by unnecessary
cloud resources.
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-8
:end-before: step-9
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-8
:end-before: step-9
If you then list the instances again, you'll see that the instance no longer appears.
Leave your shell open, as you will use it for another instance deployment in this section.
Deploy the application to a new instance
----------------------------------------
Now that you are familiar with how to create and destroy instances, it is time
to deploy the sample application. The instance you create for the app will be
similar to the first instance you created, but this time, we'll briefly
introduce a few extra concepts.
.. note:: Internet connectivity from your cloud instance is required to download the application.
When you create an instance for the application, you're going to want to give it
a bit more information than the bare instance we created and destroyed a little
while ago. We'll go into more detail in later sections, but for now, simply create
these resources so you can feed them to the instance:
* A key pair. In order to access your instance, you will need to import an SSH
public key into OpenStack to create a key pair. This key pair will be installed
on the new instance by OpenStack. Typically, your public key is written to
:code:`.ssh/id_rsa.pub`. If you do not have an SSH public key file, follow
the instructions `here <https://help.github.com/articles/generating-ssh-keys/>`_
first. We'll cover this in depth in section 2.
.. only:: fog
.. warning:: This section has not been completed
.. only:: libcloud
In the following example, :code:`pub_key_file` should be set to the location
of your public SSH key file.
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-9
:end-before: step-10
::
<KeyPair name=demokey fingerprint=aa:bb:cc... driver=OpenStack>
* Network access. By default, OpenStack will filter all traffic. You'll need to
create a security group that will allow HTTP and SSH access and apply it to
your instance. We'll go into more detail in section 2.
.. only:: fog
.. literalinclude:: ../../samples/fog/section1.rb
:start-after: step-10
:end-before: step-11
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-10
:end-before: step-11
* Userdata. During instance creation, userdata may be provided to OpenStack in
order to configure instances after they boot. The userdata is applied to an
instance by the cloud-init service. This service should be pre-installed on
the image you have chosen. We'll go into more detail in section 2.
.. only:: fog
.. warning:: This section has not been completed
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-11
:end-before: step-12
Now you're ready to boot and configure the new instance.
Booting and configuring an instance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use the image, flavor, key pair, and userdata to create a new instance. After
requesting the new instance, wait for it to finish.
.. only:: fog
.. warning:: This section has not been completed
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-12
:end-before: step-13
When the instance boots up, the information in the ex_userdata variable tells it
to go ahead and deploy the Fractals app.
Associating a Floating IP for external connectivity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We'll cover networking in greater detail in section 7, but in order to actually
see the application running, you'll need to know where to look for it. Your
instance will have outbound network access by default, but in order to provision
inbound network access (in other words, to make it reachable from the Internet)
you will need an IP address. In some cases, your instance may be provisioned
with a publicly routable IP by default. You'll be able to tell in this case
because when you list the instances you'll see an IP address listed under
public_ips or private_ips.
If not, then you'll need to create a floating IP and attach it to your instance.
.. only:: fog
.. warning:: This section has not been completed
.. only:: libcloud
Use :code:`ex_list_floating_ip_pools()` and select the first pool of
Floating IP addresses. Allocate this to your project and attach it
to your instance.
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-13
:end-before: step-14
.. todo:: remove extra blank line after break
You should see the Floating IP output to the command line:
::
<OpenStack_1_1_FloatingIpAddress: id=4536ed1e-4374-4d7f-b02c-c3be2cb09b67, ip_addr=203.0.113.101, pool=<OpenStack_1_1_FloatingIpPool: name=floating001>, driver=<libcloud.compute.drivers.openstack.OpenStack_1_1_NodeDriver object at 0x1310b50>>
You can then go ahead and attach it to the instance:
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-14
:end-before: step-15
Now go ahead and run the script to start the deployment.
Accessing the application
~~~~~~~~~~~~~~~~~~~~~~~~~
Deploying application data and configuration to the instance can take some time. Consider
enjoying a cup of coffee while you wait. After the application has been deployed, you will be able to
visit the awesome graphic interface at the link provided below using your
preferred browser.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:start-after: step-15
.. note:: If you are not using floating IPs, substitute another IP address as appropriate
.. figure:: images/screenshot_webinterface.png
:width: 800px
:align: center
:height: 600px
:alt: screenshot of the webinterface
:figclass: align-center
Next Steps
----------
Don't worry if you don't understand every part of what just happened. As we move
on to :doc:`/section2`, we'll go into these concepts in more detail.
* :doc:`/section3` - to learn how to scale the application further
* :doc:`/section4` - to learn how to make your application more durable using Object Storage
* :doc:`/section5` - to migrate the database to block storage, or use the database-as-as-service component
* :doc:`/section6` - to automatically orchestrate the application
* :doc:`/section7` - to learn about more complex networking
* :doc:`/section8` - for advice for developers new to operations
* :doc:`/section9` - to see all the crazy things we think ordinary folks won't want to do ;)
Full example code
-----------------
Here's every code snippet into a single file, in case you want to run it all in one, or
you are so experienced you don't need instruction ;) If you are going to use this,
don't forget to set your authentication information and the flavor and image ID.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section1.py
:language: python

View File

@ -0,0 +1,511 @@
==================================================================
Section Two: Introduction to the Fractals Application Architecture
==================================================================
This tutorial works with a scalable cloud application that generates
`fractals <http://en.wikipedia.org/wiki/Fractal>`_ - beautiful images made
using only mathematics, like the image below.
.. figure:: images/fractal-example.png
:scale: 50%
:align: left
This section introduces the application architecture and explains how it was designed
to take advantage of cloud features in general, and OpenStack in particular.
It also provides explanations for some of the commands which were
referenced in the previous section.
.. todo:: (for Nick) Improve the architecture discussion.
.. only:: dotnet
.. warning:: This section has not yet been completed for the .NET SDK
.. only:: fog
.. warning:: This section has not yet been completed for the fog SDK
.. only:: jclouds
.. warning:: This section has not yet been completed for the jclouds SDK
.. only:: node
.. warning:: This section has not yet been completed for the pkgcloud SDK
.. only:: openstacksdk
.. warning:: This section has not yet been completed for the OpenStack SDK
.. only:: phpopencloud
.. warning:: This section has not yet been completed for the PHP-OpenCloud SDK
Cloud application architecture principles
-----------------------------------------
Cloud applications typically have several design principles in common.
Many of the Fractals application design decisions were motivated by these principles.
Modularity and Microservices
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Microservices <http://en.wikipedia.org/wiki/Microservices>`_ are an important design pattern used
to achieve application modularity. By separating logical application functions into separate services
, maintainance and re-use is more simple. Decoupling components from each other
also makes it easier to selectively scale individual components as required. Further, application modularity
is a required feature of applications which scale out well and are fault tolerant.
Scalability
~~~~~~~~~~~
Cloud applications often make usage of a large number of small instances as opposed to a small number of
large instances. Provided that an application is sufficiently modular, microservices may be easily spread across
as many instances is required. This architecture enables an application to grow past the limit imposed by the maximum
size of an instance. It's like trying to move a large number of people from one place to another; there's only
so many people you can put on the largest bus, but you can use a virtually unlimited number of busses (or even small cars),
providing just as much capacity as you need - and no more.
Fault Tolerance
~~~~~~~~~~~~~~~
In cloud programming, there's a well-known analogy known as "cattle vs pets". If you haven't heard it before, it goes
like this:
When you're dealing with pets, you name them and care for them and if they get sick, you nurse them back to health.
Nursing pets back to health can be difficult and very time consuming. When you're dealing with cattle, you attach a
numbered tag to their ear and if they get sick you put them down and move on.
That, as it happens, is the new reality of programming. Applications and systems used to be created on large, expensive
servers, cared for by operations staff dedicated to keeping them healthy. If something went wrong with one of those
servers, the staff's job was to do whatever it took to make it right again and save the server and the application.
In cloud programming, it's very different. Rather than large, expensive servers, you're dealing with virtual
machines that are literally disposable; if something goes wrong, you shut it down and spin up a new one. There's
still operations staff, but rather than nursing individual servers back to health, their job is to monitor the
health of the overall system.
There are definite advantages to this architecture. It's easy to get a "new" server, without any of the issues
that inevitably arise when a server has been up and running for months, or even years.
As with classical infrastructure, failures of the underpinning cloud infrastructure (hardware, networks, and software) are unavoidable. When you're
designing for the cloud, it's crucial that your application is designed for an environment where failures
can happen at any moment. This may sound like a liability, but it's not; by designing your application with a high
degree of fault tolerance, you're also making it resilient in the face of change, and therefore more adaptable.
Fault tolerance is essential to the cloud-based application.
Automation
~~~~~~~~~~
If an application is meant to automatically scale up and down to meet demand, it is not feasible have any manual
steps in the process of deploying any component of the application.
Automation also decreases the time to recovery for your application in the event of component failures, increasing
fault tolerance and resilience.
Programatic Interfaces (APIs)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Like many cloud applications, the Fractals app has a `RESTful API <http://en.wikipedia.org/wiki/Representational_state_transfer>`_.
You can connect to it directly and generate fractals, or you can integrate it as a component of a larger app.
Any time a standard interface such as an API is available, automated testing becomes much more feasible,
increasing software quality.
Fractals app architecture
-------------------------
As you will see below, the Fractals app was designed with the principles of the previous subsection in mind.
You'll note that in :doc:`section1` we deployed the app in an all-in-one style, on a single virtual machine.
This isn't good practice, but as the app uses microservices to decouple logical application functions, we can
change this easily.
.. graphviz:: images/architecture.dot
Message queues are used to facilitate communication between the Fractal app
services. The Fractal app uses a so-called
`work queue <https://www.rabbitmq.com/tutorials/tutorial-two-python.html>`_ (or task queue) to distribute
tasks to the worker servies.
Message queues work in a way similar to a queue (or a line, for those of us on the other side of the ocean) in a bank being
served by multiple clerks. The message queue in our application
provides a feed of work requests that can be taken one-at-a-time by worker services,
whether there is a single worker service or hundreds of them.
This is a `useful pattern <https://msdn.microsoft.com/en-us/library/dn568101.aspx>`_ for
many cloud applications that have long lists of requests coming in and a pool of resources
from which to service them. This also means that a worker may crash and the tasks will be
processed by other workers.
.. note:: The `RabbitMQ getting started tutorial <https://www.rabbitmq.com/getstarted.html>`_ provides a great introduction to message queues.
.. graphviz:: images/work_queue.dot
The worker service consumes messages from the work queue and then processes
them to create the corresponding fractal image file.
Of course there's also a web interface which offers a more human friendly
way of accessing the API to view the created fractal images, and a simple command line interface.
.. figure:: images/screenshot_webinterface.png
:width: 800px
:align: center
:height: 600px
:alt: screenshot of the webinterface
:figclass: align-center
There are also multiple storage backends (to store the generated fractal images) and a database
component (to store the state of tasks), but we'll talk about those in :doc:`/section4` and :doc:`/section5` respectively.
How the Fractals app interacts with OpenStack
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. todo:: Description of the components of OpenStack and how they relate to the Fractals app and how it runs on the cloud.
TF notes this is already covered in the guide, just split across each section. Additing it here will force the
introduction of block storage, object storage, orchestration and neutron networking too early,
which could seriously confuse users that don't have these services in their cloud. Therefore, this should not b
done here.
The Magic Revisited
-------------------
So what exactly was that request doing at the end of the previous section?
Let's look at it again. (Note that in this subsection, we're just explaining what
you've already done in the previous section; you don't need to execute these commands again.)
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-1
:end-before: step-2
We explained image and flavor in :doc:`section1`, so in the following sections,
we will explain the other parameters in detail, including :code:`ex_userdata` (cloud-init) and
:code:`ex_keyname` (key pairs).
Introduction to cloud-init
~~~~~~~~~~~~~~~~~~~~~~~~~~
`cloud-init <https://cloudinit.readthedocs.org/en/latest/>`_ is a tool that performs instance configuration tasks during the boot of a cloud instance,
and comes installed on most cloud images. :code:`ex_userdata`, which was passed to :code:`create_node`, is the configuration data passed to cloud-init.
In this case, we are presenting a shell script as the `userdata <https://cloudinit.readthedocs.org/en/latest/topics/format.html#user-data-script>`_.
When :code:`create_node` creates the instance, :code:`cloud-init` executes the shell script in the :code:`userdata` variable.
When an SSH public key is provided during instance creation, cloud-init will install this key on a user account. (The username varies between
cloud images.) See the `Obtaining Images <http://docs.openstack.org/image-guide/content/ch_obtaining_images.html>`_ section of the image guide
for some guidance on which username you should use when SSHing. If you still have problems logging in, ask your cloud provider to confirm the username.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-2
:end-before: step-3
Once the instance is created, cloud-init downloads and executes a script called :code:`install.sh`.
This script installs the Fractals app. Cloud-init is capable
of consuming a number of different types of data, not just bash scripts.
You can even provide multiple types of data. You can find further information about
cloud-init in the
`official documentation <https://cloudinit.readthedocs.org/en/latest/>`_.
Introduction to key pairs
~~~~~~~~~~~~~~~~~~~~~~~~~
As you might imagine, security is important when it comes to your instances; you can't
have just anyone accessing them. In order to enable logging into an instance, you need to provide
the public key of an SSH key pair during instance creation. In section one,
you made sure that you had a key pair and uploaded it to OpenStack, and cloud-init installed
it for the user account.
Even with a key in place, however, you'll need to have the appropriate security group rules in place to access your instance.
Introduction to security groups
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Security groups are sets of network access rules that are applied to an instance's networking.
By default, only egress (outbound) traffic is allowed. You must explicitly enable ingress (inbound) network access by
creating a security group rule.
.. warning:: Removing the egress rule created by OpenStack will cause your instance
networking to break.
Start by creating a security group for the all-in-one instance and adding the appropriate rules, such as HTTP (TCP port 80) and SSH (TCP port 22):
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-3
:end-before: step-4
.. note:: :code:`ex_create_security_group_rule()` takes ranges of ports as input. This is why ports 80 and 22 are passed twice.
You can list available security groups with:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-4
:end-before: step-5
Once you've created a rule or group, you can also delete it:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-5
:end-before: step-6
To see which security groups apply to an instance, you can:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-6
:end-before: step-7
.. todo:: print() ?
Once you've configured permissions, you'll need to know where to access the application.
Introduction to Floating IPs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As in traditional IT, cloud instances are accessed via IP addresses. Rather than static IPs, however, these IP addresses are
assigned programmatically by OpenStack. How this is actually done depends on the networking setup for your cloud. In
some cases, you will simply get an Internet routable IP address assigned directly to your instance.
The most common way for OpenStack clouds to allocate Internet routable IP addresses to instances, however, is through the use of Floating IPs.
A Floating IP is an address that exists as an entity unto itself, and can be associated to a specific instance network interface.
When a Floating IP address is associated to an instance network interface, OpenStack re-directs traffic bound for that address to
the address of the instance's internal network interface address. Your cloud provider will generally offer pools of floating IPs for your use.
To use a Floating IP, you must first allocate an IP to your project, then associate it to your instance's network interface.
.. note::
Allocating a Floating IP address to an instance does not change the IP address of the instance,
it causes OpenStack to establish the network translation rules to allow an *additional* IP address.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-7
:end-before: step-8
If you have no free Floating IPs that have been previously allocated for your project, first select a Floating IP pool offered by your provider.
In this example, we have selected the first one and assume that it has available IP addresses.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-8
:end-before: step-9
Now request that an address from this pool be allocated to your project.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-9
:end-before: step-10
Now that you have an unused floating IP address allocated to your project, attach it to an instance.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-10
:end-before: step-11
That brings us to where we ended up at the end of :doc:`/section1`. But where do we go from here?
Splitting services across multiple instances
--------------------------------------------
We've talked about separating functions into different microservices, and how that
enables us to make use of the cloud architecture. Now let's see that in action.
The rest of this tutorial won't reference the all-in-one instance you created in section one.
Take a moment to delete this instance.
It's easy to split out services into multiple instances. We will create a controller instance called :code:`app-controller`,
which hosts the API, database, and messaging services. We'll also create a worker instance called :code:`app-worker-1`, which just generates fractals.
The first step is to start the controller instance. The instance has the API service, the database, and the messaging service,
as you can see from the parameters passed to the installation script.
========== ====================== =============================
Parameter Description Values
========== ====================== =============================
:code:`-i` Install a service :code:`messaging` (install RabbitMQ) and :code:`faafo` (install the Faafo app).
:code:`-r` Enable/start something :code:`api` (enable and start the API service), :code:`worker` (enable and start the worker service), and :code:`demo` (run the demo mode to request random fractals).
========== ====================== =============================
.. todo:: https://bugs.launchpad.net/openstack-manuals/+bug/1439918
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-11
:end-before: step-12
Note that this time, when you create a security group, you're including a rule that only applies
for instances that are part of the worker_group.
Next, start a second instance, which will be the worker instance:
.. todo :: more text necessary here...
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-12
:end-before: step-13
Notice that you've added this instance to the worker_group, so it can access the controller.
As you can see from the parameters passed to the installation script, you are specifying that this is the worker instance, but you're also passing the address of the API instance and the message
queue so the worker can pick up requests. The Fractals app installation script can take several parameters.
========== ==================================================== ====================================
Parameter Description Example
========== ==================================================== ====================================
:code:`-e` The endpoint URL of the API service. http://localhost/
:code:`-m` The transport URL of the messaging service. amqp://guest:guest@localhost:5672/
:code:`-d` The connection URL for the database (not used here). sqlite:////tmp/sqlite.db
========== ==================================================== ====================================
Now if you make a request for a new fractal, you connect to the controller instance, :code:`app-controller`, but the
work will actually be performed by a separate worker instance - :code:`app-worker-1`.
Login with SSH and use the Fractal app
--------------------------------------
Login to the worker instance, :code:`app-worker-1`, with SSH, using the previous added SSH key pair "demokey". Start
by getting the IP address of the worker:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:start-after: step-13
:end-before: step-14
Now you can SSH into the instance:
::
$ ssh -i ~/.ssh/id_rsa USERNAME@IP_WORKER_1
.. note:: Replace :code:`IP_WORKER_1` with the IP address of the worker instance and USERNAME to the appropriate username.
Once you've logged in, check to see whether the worker service process is running as expected.
You can find the logs of the worker service in the directory :code:`/var/log/supervisor/`.
::
worker # ps ax | grep faafo-worker
17210 ? R 7:09 /usr/bin/python /usr/local/bin/faafo-worker
Open :code:`top` to monitor the CPU usage of the :code:`faafo-worker` process.
Now log into the controller instance, :code:`app-controller`, also with SSH, using the previously added SSH key pair "demokey".
::
$ ssh -i ~/.ssh/id_rsa USERNAME@IP_CONTROLLER
.. note:: Replace :code:`IP_CONTROLLER` with the IP address of the controller instance and USERNAME to the appropriate username.
Check to see whether the API service process is running like expected. You can find the logs for the API service
in the directory :code:`/var/log/supervisor/`.
::
controller # ps ax | grep faafo-api
17209 ? Sl 0:19 /usr/bin/python /usr/local/bin/faafo-api
Now call the Fractal app's command line interface (:code:`faafo`) to request a few new fractals.
The following command will request a few fractals with random parameters:
::
controller # faafo --endpoint-url http://localhost --verbose create
2015-04-02 03:55:02.708 19029 INFO faafo.client [-] generating 6 task(s)
Watch :code:`top` on the worker instance. Right after calling :code:`faafo` the :code:`faafo-worker` process should start consuming a lot of CPU cycles.
::
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
17210 root 20 0 157216 39312 5716 R 98.8 3.9 12:02.15 faafo-worker
To show the details of a specific fractal use the subcommand :code:`show` of the Faafo CLI.
::
controller # faafo show 154c7b41-108e-4696-a059-1bde9bf03d0a
+------------+------------------------------------------------------------------+
| Parameter | Value |
+------------+------------------------------------------------------------------+
| uuid | 154c7b41-108e-4696-a059-1bde9bf03d0a |
| duration | 4.163147 seconds |
| dimensions | 649 x 869 pixels |
| iterations | 362 |
| xa | -1.77488588389 |
| xb | 3.08249829401 |
| ya | -1.31213919301 |
| yb | 1.95281690897 |
| size | 71585 bytes |
| checksum | 103c056f709b86f5487a24dd977d3ab88fe093791f4f6b6d1c8924d122031902 |
+------------+------------------------------------------------------------------+
There are more commands available; find out more details about them with :code:`faafo get --help`, :code:`faafo list --help`, and :code:`faafo delete --help`.
.. note:: The application stores the generated fractal images directly in the database used by the API service instance.
Storing image files in database is not good practice. We're doing it here as an example only as an easy
way to allow multiple instances to have access to the data. For best practice, we recommend storing
objects in Object Storage, which is covered in :doc:`section4`.
Next Steps
----------
You should now have a basic understanding of the architecture of cloud-based applications. In addition,
you now have had practice starting new instances, automatically configuring them at boot, and
even modularizing an application so that you may use multiple instances to run it. These are the basic
steps for requesting and using compute resources in order to run your application on an OpenStack cloud.
From here, you should go to :doc:`/section3` to learn how to scale the application further. Alternately, you may
jump to any of these sections:
* :doc:`/section4` - to learn how to make your application more durable using Object Storage
* :doc:`/section5` - to migrate the database to block storage, or use the database-as-as-service component
* :doc:`/section6` - to automatically orchestrate the application
* :doc:`/section7` - to learn about more complex networking
* :doc:`/section8` - for advice for developers new to operations
Full example code
-----------------
Here's every code snippet into a single file, in case you want to run it all in one, or
you are so experienced you don't need instruction ;) If you are going to use this,
don't forget to set your authentication information and the flavor and image ID.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section2.py
:language: python

View File

@ -0,0 +1,348 @@
==========================
Section Three: Scaling Out
==========================
.. todo:: For later versions of this guide: implement a service within the fractals app
that simply returns the CPU load on the local server. Then add to this section
a simple loop that checks to see if any servers are overloaded and adds a new
one if they are. (Or do this via SSH and w)
One of the most-often cited reasons for designing applications using cloud patterns is
the ability to **scale out**. That is: to add additional resources as required. This is in
contrast to the previous mentality of increasing capacity by scaling the size of existing resources up.
In order for scale out to be feasible, you'll need to do two things:
* Architect your application to make use of additional resources.
* Make it possible to add new resources to your application.
.. todo:: nickchase needs to restate the second point
In section 2, we talked about various aspects of the application architecture, such
as building in a modular fashion, creating an API, and so on. Now you'll see why
those are so important. By creating a modular application with decoupled services,
it is possible to identify components that cause application performance bottlenecks
and scale them out.
Just as importantly, you can also remove resources when they are no longer necessary.
It is very difficult to overstate the cost savings that this feature can bring, as
compared to traditional infrastructure.
Of course, just having access to additional resources is only part of the battle;
while it's certainly possible to manually add or destroy resources, you'll get more
value -- and more responsiveness -- if the application simply requests new resources
automatically when it needs them.
This section continues to illustrate the separation of services onto multiple instances
and highlights some of the choices we've made that facilitate scalability in
the app's architecture.
We'll progressively ramp up to use up to about 6 instances, so ensure
that your cloud account has appropriate quota to handle that many.
In the previous section, we used two virtual machines - one 'control' service and one 'worker'.
In our application, the speed at which fractals can be generated depends on the number of workers.
With just one worker, we can only produce one fractal at a time. Before long, it will be clear
that we need more resources.
.. note:: If you don't have a working application, follow the steps in :doc:`section2` to create one.
.. todo:: ensure we have the controller_ip even if this is a new python session.
Generate load
-------------
You can test for yourself what happens when the Fractals app is under loaded by
* maxing out the CPU of the existing worker instances (loading the worker)
* generating a lot of API requests (load up the API)
Generate a lot of worker load
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use SSH to login to the controller instance, :code:`app-controller`, using the previous added SSH keypair.
::
$ ssh -i ~/.ssh/id_rsa USERNAME@IP_CONTROLLER
.. note:: Replace :code:`IP_CONTROLLER` with the IP address of the controller instance and USERNAME to the appropriate username.
Call the Fractal app's command line interface (:code:`faafo`) to request the generation of 5 large fractals.
::
$ faafo create --height 9999 --width 9999 --tasks 5
Now if you check the load on the worker, you can see that the instance is not doing well.
On our single CPU flavor instance, a load average of more than 1 means we are at capacity.
::
$ ssh -i ~/.ssh/id_rsa USERNAME@IP_WORKER uptime
10:37:39 up 1:44, 2 users, load average: 1.24, 1.40, 1.36
.. note:: Replace :code:`IP_WORKER` with the IP address of the worker instance and USERNAME to the appropriate username.
Generate a lot of API load
~~~~~~~~~~~~~~~~~~~~~~~~~~
API load is a slightly different problem to the previous one regarding capacity to work. We can
simulate many requests to the API as follows:
Use SSH to login to the controller instance, :code:`app-controller`, using the previous added SSH keypair.
::
$ ssh -i ~/.ssh/id_rsa USERNAME@IP_CONTROLLER
.. note:: Replace :code:`IP_CONTROLLER` with the IP address of the controller instance and USERNAME to the appropriate username.
Call the Fractal app's command line interface (:code:`faafo`) in a for loop to
send many requests to the API. The following command will request a random set of fractals,
500 times:
::
$ for i in $(seq 1 500); do faafo --endpoint-url http://IP_CONTROLLER create &; done
.. note:: Replace :code:`IP_CONTROLLER` with the IP address of the controller instance.
Now if you check the load on the API service instance, :code:`app-controller`, you can see that the instance is not doing well.
On our single CPU flavor instance, a load average of more than 1 means we are at capacity.
::
$ uptime
10:37:39 up 1:44, 2 users, load average: 1.24, 1.40, 1.36
The number of requests coming in means that some requests for fractals may not even get
onto the message queue to be processed. To ensure we can cope with demand,
we need to scale out our API services as well.
As you can see, we need to scale out the Fractals application's API capability.
Scaling out
-----------
Remove the old App
~~~~~~~~~~~~~~~~~~
Go ahead and delete the existing instances and security groups you created in previous sections.
Remember; when components in the cloud aren't doing what you want them to do, just remove them and
re-create something new.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section3.py
:start-after: step-1
:end-before: step-2
Extra Security Groups
~~~~~~~~~~~~~~~~~~~~~
As you change the topology of your applications, you will need to update or create new security
groups. Here, we will re-create the required security groups.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section3.py
:start-after: step-2
:end-before: step-3
A Floating IP Helper Function
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Define a short function to locate unused or allocate a new floating IP. This saves a few lines of boring code
and prevents you from reaching your Floating IP quota too quickly.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section3.py
:start-after: step-3
:end-before: step-4
Splitting off the Database and Message Queue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Prior to scaling out our application services like the API service or the workers
we have to add a central database and messaging instance, called :code:`app-services`,
that will be used to track the state of the fractals and to coordinate the communication between the services.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section3.py
:start-after: step-4
:end-before: step-5
Scaling the API Service
~~~~~~~~~~~~~~~~~~~~~~~
With multiple workers producing fractals as fast as they can, we also need to make sure we
can receive the requests for fractals as quickly as possible. If our application
becomes popular, we may have many thousands of users trying to connect to our API to generate fractals.
Armed with our security group, image and flavor size we can now add multiple API services:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section3.py
:start-after: step-5
:end-before: step-6
These are client-facing services, so unlike the workers they do not use a message queue
to distribute tasks. Instead, we'll need to introduce some kind of load balancing mechanism
to share incoming requests between the different API services.
One simple way might be to give half of our friends one address and half the other, but that's certainly
not a sustainable solution. Instead, we can do that automatically using a `DNS round robin <http://en.wikipedia.org/wiki/Round-robin_DNS>`_.
However, OpenStack networking can provide Load Balancing as a Service, which we'll explain in :doc:`/section7`.
.. todo:: Add a note that we demonstrate this by using the first API instance for the workers and the second API instance for the load simulation.
Scaling the workers
~~~~~~~~~~~~~~~~~~~
To increase the overall capacity, we will now add 3 workers:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section3.py
:start-after: step-6
:end-before: step-7
Adding this capacity enables you to deal with a higher number of requests for fractals.
As soon as these worker instances come up, they'll start checking the message queue looking
for requests, reducing the overall backlog like a new register opening in the supermarket.
This was obviously a very manual process - figuring out we needed more workers and then
starting new ones required some effort. Ideally the system would do this itself. If your
application has been built to detect these situations, you can have it automatically request
and remove resources, but you don't actually need to do this work yourself. Instead, the
OpenStack Orchestration service can monitor load and start instances as appropriate.
See :doc:`section6` to find out how to set that up.
Verifying we've had an impact
-----------------------------
In the steps above, we've split out several services and expanded capacity. SSH to one of the
app instances and create a few fractals. You will see that the Fractals app has a few new features.
::
$ ssh -i ~/.ssh/id_rsa USERNAME@IP_API_1
.. note:: Replace :code:`IP_API_1` with the IP address of the first API instance and USERNAME to the appropriate username.
Use the Fractal app's command line interface to generate fractals :code:`faafo create`.
Watch the progress of fractal generation with the :code:`faafo list`. Use :code:`faafo UUID`
to examine some of the fractals. The generated_by field will show which worker
created the fractal. The fact that multiple worker instances are sharing the work means
that fractals will be generated more quickly and the death of a worker probably won't even
be noticed.
::
root@app-api-1:/var/log/supervisor# faafo list
+--------------------------------------+------------------+-------------+
| UUID | Dimensions | Filesize |
+--------------------------------------+------------------+-------------+
| 410bca6e-baa7-4d82-9ec0-78e409db7ade | 295 x 738 pixels | 26283 bytes |
| 66054419-f721-492f-8964-a5c9291d0524 | 904 x 860 pixels | 78666 bytes |
| d123e9c1-3934-4ffd-8b09-0032ca2b6564 | 952 x 382 pixels | 34239 bytes |
| f51af10a-084d-4314-876a-6d0b9ea9e735 | 877 x 708 pixels | 93679 bytes |
+--------------------------------------+------------------+-------------+
root@app-api-1:# faafo show d123e9c1-3934-4ffd-8b09-0032ca2b6564
+--------------+------------------------------------------------------------------+
| Parameter | Value |
+--------------+------------------------------------------------------------------+
| uuid | d123e9c1-3934-4ffd-8b09-0032ca2b6564 |
| duration | 1.671410 seconds |
| dimensions | 952 x 382 pixels |
| iterations | 168 |
| xa | -2.61217 |
| xb | 3.98459 |
| ya | -1.89725 |
| yb | 2.36849 |
| size | 34239 bytes |
| checksum | d2025a9cf60faca1aada854d4cac900041c6fa762460f86ab39f42ccfe305ffe |
| generated_by | app-worker-2 |
+--------------+------------------------------------------------------------------+
root@app-api-1:# faafo show 66054419-f721-492f-8964-a5c9291d0524
+--------------+------------------------------------------------------------------+
| Parameter | Value |
+--------------+------------------------------------------------------------------+
| uuid | 66054419-f721-492f-8964-a5c9291d0524 |
| duration | 5.293870 seconds |
| dimensions | 904 x 860 pixels |
| iterations | 348 |
| xa | -2.74108 |
| xb | 1.85912 |
| ya | -2.36827 |
| yb | 2.7832 |
| size | 78666 bytes |
| checksum | 1f313aaa36b0f616b5c91bdf5a9dc54f81ff32488ce3999f87a39a3b23cf1b14 |
| generated_by | app-worker-1 |
+--------------+------------------------------------------------------------------+
The fractals are now available from any of the app-api hosts. Visit
http://IP_API_1/fractal/FRACTAL_UUID and http://IP_API_2/fractal/FRACTAL_UUID to verify. Now you have multiple
redundant web services. If one dies, the others can be used.
.. note:: Replace :code:`IP_API_1` and :code:`IP_API_2` with the corresponding Floating IPs. Replace FRACTAL_UUID
the UUID of an existing fractal.
Go ahead and test the fault tolerance. Start killing workers and API instances. As long as you have one of each, your application
should be fine. There is one weak point though. The database contains the fractals and fractal metadata. If you lose that instance,
the application will stop. Future sections will work to address this weak point.
If we had a load balancer, we could distribute this load between the two different API
services. As mentioned previously, there are several options. We will show one in :doc:`section7`.
You could in theory use a simple script to monitor the load
on your workers and API services and trigger the creation of new instances, which
you already know how to do. If you can see how to do that - congratulations, you're ready
to create scalable cloud applications.
Of course, creating a monitoring system just for one application may not always be
the best way. We recommend you look at :doc:`section6` to find out about how you
can use OpenStack Orchestration's monitoring and autoscaling capabilities to do
steps like this automatically.
Next Steps
----------
You should now be fairly confident about starting new instance, and about segregating services of an application between them.
As mentioned in :doc:`/section2` the generated fractals images will be saved on the local filesystem of the API service instances. Because we now have multiple API
instances up and running the generated fractal images will be spreaded accross multiple API services, stored on local instance filesystems. This ends in a lot of
:code:`IOError: [Errno 2] No such file or directory` exceptions when trying to download a fractal image from an API service instance not holding the fractal
image on its local filesystem.
From here, you should go to :doc:`/section4` to learn how to use Object Storage to solve this problem in a elegant way. Alternately, you may jump to any of these sections:
* :doc:`/section5` - to migrate the database to block storage, or use the database-as-as-service component
* :doc:`/section6` - to automatically orchestrate the application
* :doc:`/section7` - to learn about more complex networking
* :doc:`/section8` - for advice for developers new to operations
Full example code
-----------------
Here's every code snippet into a single file, in case you want to run it all in one, or
you are so experienced you don't need instruction ;) If you are going to use this,
don't forget to set your authentication information and the flavor and image ID.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section3.py
:language: python

View File

@ -0,0 +1,303 @@
===============================
Section Four: Making it Durable
===============================
.. todo:: https://github.com/apache/libcloud/pull/492
.. todo:: For later versions of the guide: Extend the Fractals app to use Swift directly, and show the actual code from there.
.. todo:: Explain how to get objects back out again.
.. todo:: Large object support in Swift http://docs.openstack.org/developer/swift/overview_large_objects.html
This section introduces object storage.
`OpenStack Object Storage <http://www.openstack.org/software/openstack-storage/>`_
(code-named Swift) is open source software for creating redundant, scalable data storage
using clusters of standardized servers to store petabytes of accessible data.
It is a long-term storage system for large amounts of static data that can be
retrieved, leveraged, and updated. Access is via an API, not through a file-system
like more traditional storage.
There are a two key concepts to understand in the Object Storage API. The Object
Storage API is organized around two types of entities:
* Objects
* Containers
Similar to the Unix programming model, an Object is a "bag of bytes" that contains data,
such as documents and images. Containers are used to group objects.
You can make many objects inside a container, and have many containers inside your account.
If you think about how you traditionally make what you store durable, very quickly you should come
to the conclusion that keeping multiple copies of your objects on separate systems is a good way
to do that. However, keeping track of multiple copies of objects is a pain, and building that
into an app requires a lot of logic. OpenStack Object Storage does this automatically for you
behind-the-scenes - replicating each object at least twice before returning 'write success' to your
API call. It will always work to ensure that there are three copies of your objects (by default)
at all times - replicating them around the system in case of hardware failure, maintanance, network
outage or any other kind of breakage. This is very convenient for app creation - you can just dump
objects into object storage and not have to care about any of this additional work to keep them safe.
Using Object Storage to store fractals
--------------------------------------
The Fractals app currently uses the local filesystem on the instance to store the images it
generates. This is not scalable or durable, for a number of reasons.
Because the local filesystem is ephemeral storage, if the instance is terminated, the fractal
images will be lost along with the instance. Block based storage, which we'll discuss in :doc:`/section5`,
avoids that problem, but like local filesystems, it
requires administration to ensure that it does not fill up, and immediate attention if disks fail.
The Object Storage service manages many of these tasks that normally would require the application owner
to manage them, and presents a scalable and durable API that you can use for the fractals app, without
having to be concerened with the low level details of how the objects are stored and replicated,
and growing the storage pool. In fact, Object Storage handles replication intrinsicly, storing multiple
copies of each object and returning one of them on demand using the API.
First, let's learn how to connect to the Object Storage Endpoint:
.. only:: dotnet
.. warning:: This section has not yet been completed for the .NET SDK
.. only:: fog
.. warning:: This section has not yet been completed for the fog SDK
.. only:: jclouds
.. warning:: This section has not yet been completed for the jclouds SDK
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-1
:end-before: step-2
.. warning::
Libcloud 0.16 and 0.17 are afflicted with a bug that means authentication to
a swift endpoint can fail with `a Python exception <https://issues.apache.org/jira/browse/LIBCLOUD-635>`_.
If you encounter this, you can upgrade your libcloud version, or apply a simple
`2-line patch <https://github.com/fifieldt/libcloud/commit/ec58868c3344a9bfe7a0166fc31c0548ed22ea87>`_.
.. note:: Libcloud uses a different connector for Object Storage to all other OpenStack services,
so a conn object from previous sections won't work here and we have to create a new one named :code:`swift`.
.. only:: node
.. warning:: This section has not yet been completed for the pkgcloud SDK
.. only:: openstacksdk
.. warning:: This section has not yet been completed for the OpenStack SDK
.. only:: phpopencloud
.. warning:: This section has not yet been completed for the PHP-OpenCloud SDK
To begin to store objects, we must first make a container.
Call yours :code:`fractals`:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-2
:end-before: step-3
You should see output such as:
.. code-block:: python
<Container: name=fractals, provider=OpenStack Swift>
You should now be able to see this container appear in a listing of
all containers in your account:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-3
:end-before: step-4
You should see output such as:
.. code-block:: python
[<Container: name=fractals, provider=OpenStack Swift>]
The next logical step is to upload an object. Find a photo of a goat
online, name it :code:`goat.jpg` and upload it to your container :code:`fractals`:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-4
:end-before: step-5
List objects in your container :code:`fractals` to see if the upload was successful, then download
the file to verify the md5sum is the same:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-5
:end-before: step-6
::
[<Object: name=an amazing goat, size=191874, hash=439884df9c1c15c59d2cf43008180048, provider=OpenStack Swift ...>]
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-6
:end-before: step-7
::
<Object: name=an amazing goat, size=954465, hash=7513986d3aeb22659079d1bf3dc2468b, provider=OpenStack Swift ...>
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-7
:end-before: step-8
::
7513986d3aeb22659079d1bf3dc2468b
Finally, let's clean up by deleting our test object:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-8
:end-before: step-9
.. note:: You need to pass in objects to the delete commands, not object names.
Now there should be no more objects be available in the container :code:`fractals`.
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-9
:end-before: step-10
::
[]
Backup the Fractals from the database on the Object Storage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
So let's now use the knowledge from above to backup the images of the Fractals app, stored inside the database right now, on the Object Storage.
Use the :code:`fractals`' container from above to put the images in:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-10
:end-before: step-11
Next, we backup all of our existing fractals from the database to our swift container. A simple for loop takes care of that:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-11
:end-before: step-12
::
<Object: name=025fd8a0-6abe-4ffa-9686-bcbf853b71dc, size=61597, hash=b7a8a26e3c0ce9f80a1bf4f64792cd0c, provider=OpenStack Swift ...>
<Object: name=26ca9b38-25c8-4f1e-9e6a-a0132a7a2643, size=136298, hash=9f9b4cac16893854dd9e79dc682da0ff, provider=OpenStack Swift ...>
<Object: name=3f68c538-783e-42bc-8384-8396c8b0545d, size=27202, hash=e6ee0cd541578981c294cebc56bc4c35, provider=OpenStack Swift ...>
.. note:: Replace :code:`IP_API_1` with the IP address of the API instance.
.. note:: The example code uses the awesome `Requests library <http://docs.python-requests.org/en/latest/>`_. Ensure that it is installed on your system before trying to run the script above.
Configure the Fractals app to use Object Storage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. warning:: Currenctly it is not possible to directly store generated images on the OpenStack Object Storage. Please revisit this section again in the future.
Extra Features
--------------
Delete containers
~~~~~~~~~~~~~~~~~
One call we didn't cover above that you probably need to know is how to delete a container.
Ensure that you have removed all objects from the container before running this, otherwise
it will fail:
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-12
:end-before: step-13
.. warning:: It is not possible to restore deleted objects. Be careful.
Add metadata to objects
~~~~~~~~~~~~~~~~~~~~~~~
You can also do advanced things like uploading an object with metadata, such
as in this below example, but for further information we'll refer you to the
documentation for your SDK. This option also uses a bit stream to upload the
file - iterating bit by bit over the file and passing those bits to swift as
they come, compared to loading the entire file in memory and then sending it.
This is more efficient, especially for larger files.
.. only:: libcloud
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-13
:end-before: step-14
.. todo:: It would be nice to have a pointer here to section 9.
Large objects
~~~~~~~~~~~~~
For efficiency, most Object Storage installations treat large objects (say, :code:`> 5GB`)
differently than smaller objects.
.. only:: libcloud
If you are working with large objects, use the :code:`ex_multipart_upload_object`
call instead of the simpler :code:`upload_object` call. How the upload works behind-the-scenes
is by splitting the large object into chunks, and creating a special manifest so
they can be recombined on download. Alter the :code:`chunk_size` parameter (in bytes) according to
what your cloud can accept.
.. literalinclude:: ../../samples/libcloud/section4.py
:start-after: step-14
:end-before: step-15
Next Steps
----------
You should now be fairly confident working with Object Storage.
You can find more about the Object Storage SDK calls at:
.. only:: libcloud
https://libcloud.readthedocs.org/en/latest/storage/api.html
Or try a different step in the tutorial, including:
* :doc:`/section5` - to migrate the database to block storage, or use the database-as-as-service component
* :doc:`/section6` - to automatically orchestrate the application
* :doc:`/section7` - to learn about more complex networking
* :doc:`/section8` - for advice for developers new to operations

View File

@ -0,0 +1,275 @@
===========================
Section Five: Block Storage
===========================
.. todo:: (For nick: Restructure the introduction to this chapter to provide context of what we're actually
going to do.)
By default, data in OpenStack instances is stored on 'ephemeral' disks. These stay with the instance throughout its lifetime, but when the
instance is terminated, that storage disappears -- along with all the data stored on it. Ephemeral storage is allocated to a
single instance and cannot be moved to another instance.
In this section, we will introduce block storage. Block storage (sometimes referred to as volume storage) provides you
with access to persistent storage devices. You interact with block storage by attaching volumes
to running instances, just as you might attach a USB drive to a physical server. Volumes can be detached from one instance and re-attached to another, and the data remains intact.
Block storage is implemented in OpenStack by the OpenStack Block Storage (cinder) project.
One component of the Fractal app that cannot be allowed to fail is the database server, which is used to keep track
of all of the data about fractals that have been created, including their storage location. So while you may have
configured the images to be stored in Object Storage in the previous section, without the database we lose track of
where in Object Storage they are, and the parameters that were used to create them.
Advanced users should consider how to remove the database from the architecture altogether and replace it
with metadata in the Object Storage (then contribute these steps to :doc:`section9`). Others should read
on to learn about how to work with block storage and move the Fractal app database server to use it.
Basics
------
Later on, we'll use a volume from the block storage service
to provide persistent storage for the Fractal app's database server,
but first - let's cover the basics, such as creating and attaching a block storage device.
.. only:: dotnet
.. warning:: This section has not yet been completed for the .NET SDK
.. only:: fog
.. warning:: This section has not yet been completed for the fog SDK
.. only:: jclouds
.. warning:: This section has not yet been completed for the jclouds SDK
.. only:: node
.. warning:: This section has not yet been completed for the pkgcloud SDK
.. only:: openstacksdk
.. warning:: This section has not yet been completed for the OpenStack SDK
.. only:: phpopencloud
.. warning:: This section has not yet been completed for the PHP-OpenCloud SDK
As always, connect to the API endpoint:
.. only:: libcloud
.. code-block:: python
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
auth_username = 'your_auth_username'
auth_password = 'your_auth_password'
auth_url = 'http://controller:5000'
project_name = 'your_project_name_or_id'
region_name = 'your_region_name'
provider = get_driver(Provider.OPENSTACK)
connection = provider(auth_username,
auth_password,
ex_force_auth_url=auth_url,
ex_force_auth_version='2.0_password',
ex_tenant_name=project_name,
ex_force_service_region=region_name)
To try it out, make a 1GB volume called :test'.
.. only:: libcloud
.. code-block:: python
volume = connection.create_volume(1, 'test')
print(volume)
::
<StorageVolume id=755ab026-b5f2-4f53-b34a-6d082fb36689 size=1 driver=OpenStack>
.. note:: The parameter :code:`size` is in GigaBytes.
List all volumes to see if it was successful:
.. only:: libcloud
.. code-block:: python
volumes = connection.list_volumes()
print(volumes)
::
[<StorageVolume id=755ab026-b5f2-4f53-b34a-6d082fb36689 size=1 driver=OpenStack>]
Now that you have created a storage volume, let's attach it to an already running instance.
Using Block Storage for the Fractal Database Server
---------------------------------------------------
Firstly, we're going to need a new server for our dedicated database.
Start a new instance called :code:`app-database` using the image, flavor
and keypair you have been using since :doc:`/section1`.
We will also need a new security group to allow access to the database server
(for mysql, port 3306) from the network:
.. only:: libcloud
.. code-block:: python
db_group = connection.ex_create_security_group('database', 'for database service')
connection.ex_create_security_group_rule(db_group, 'TCP', 3306, 3306)
instance = connection.create_node(name='app-database',
image=image,
size=flavor,
ex_keyname=keypair_name,
ex_security_groups=[db_group])
Using the unique identifier (UUID) for the volume, make a new volume object, then
use the server object from the previous snippet and attach the volume to it at :code:`/dev/vdb`:
.. only:: libcloud
.. code-block:: python
volume = connection.ex_get_volume('755ab026-b5f2-4f53-b34a-6d082fb36689')
connection.attach_volume(instance, volume, '/dev/vdb')
Log in to the server to be able to run the following steps.
.. note:: Replace :code:`IP_SERVICES` with the IP address of the services instance and USERNAME to the appropriate username.
Now prepare the empty block device.
::
$ ssh -i ~/.ssh/id_rsa USERNAME@IP_SERVICES
# fdisk -l
# mke2fs /dev/vdb
# mkdir /mnt/database
# mount /dev/vdb /mnt/database
.. todo:: Outputs missing, add attaching log from dmesg.
Stop the running MySQL database service and move the database files from :code:`/var/lib/mysql` onto the new volume (temporary mounted at :code:`/mnt/database`).
::
# systemctl stop mariadb
# mv /var/lib/mysql/* /mnt/database
Sync the filesystems and mount the new blockdevice now containing the database files to :code:`/var/lib/mysql`.
::
# sync
# umount /mnt/database
# rm -rf /mnt/database
# echo "/dev/vdb /var/lib/mysql ext4 defaults 1 2" >> /etc/fstab
# mount /var/lib/mysql
Finally start the previously stopped MySQL database service and check if everything is working like expected.
::
# systemctl start mariadb
# mysql -ufaafo -ppassword -h localhost faafo -e 'show tables;'
Extras
------
You can detach the volume and re-attach it elsewhere, or destroy the volume with the below steps.
.. warning::
The following operations are destructive and will result in data loss.
To detach and destroy a volume:
.. only:: libcloud
.. code-block:: python
connection.detach_volume(volume)
::
True
.. code-block:: python
connection.destroy_volume(volume)
.. note:: :code:`detach_volume` and :code:`destroy_volume` take a volume object, not a name.
There are also many other useful features, such as the ability to create snapshots of volumes (handy for backups):
.. only:: libcloud
.. code-block:: python
* snapshot_name = 'test_backup_1'
connnection.create_volume_snapshot('test', name='test backup 1')
.. todo:: Do we need a note here to mention that 'test' is the volume name and not the volume object?
You can find information about these calls and more in the `libcloud documentation <http://ci.apache.org/projects/libcloud/docs/compute/drivers/openstack.html>`_.
Working with the OpenStack Database service
-------------------------------------------
You created the database manually above, which is fine for a case with a single
database you won't touch often like this. However, OpenStack also has a component
code-named :code:`trove` that provides Database as a Service (DBaaS).
.. note:: This OpenStack Database service is not installed in many clouds right now, but if your cloud does
support it, it can make your life a lot easier when working with databases.
SDKs don't generally support the service yet, but you can use the 'trove' commandline client
to work with it instead.
Install the trove commandline client by following this guide:
http://docs.openstack.org/cli-reference/content/install_clients.html
Then set up the necessary variables for your cloud in an 'openrc' file using this guide:
http://docs.openstack.org/cli-reference/content/cli_openrc.html
Ensure you have an openrc.sh file, source it and then check your trove client works:
::
$ cat openrc.sh
export OS_USERNAME=your_auth_username
export OS_PASSWORD=your_auth_password
export OS_TENANT_NAME=your_project_name
export OS_AUTH_URL=http://controller:5000/v2.0
export OS_REGION_NAME=your_region_name
$ source openrc.sh
$ trove --version
1.0.9
From there, you can find a good resource on what is supported and how
to use in `these slides <http://www.slideshare.net/hastexo/hands-on-trove-database-as-a-service-in-openstack-33588994>`_. Steps to work with an existing database
service installation start on slide 28.
Next Steps
----------
You should now be fairly confident working with Block Storage volumes.
There are several calls we did not cover. To see these and more,
refer to the volume documentation of your SDK, or try a different step in the tutorial, including:
* :doc:`/section6` - to automatically orchestrate the application
* :doc:`/section7` - to learn about more complex networking
* :doc:`/section8` - for advice for developers new to operations

View File

@ -0,0 +1,199 @@
==========================
Section Six: Orchestration
==========================
.. todo:: Needs to be restructured so that the fractals app is used as the example for the explanatory material.
.. note:: Sorry! We're not quite happy with this chapter. It will give you an introduction to heat,
but it's a little dry at the moment. We'd like to write a template for the Fractals app instead
of using the "hello world" style ones, so stay tuned!
Throughout this guide, we've talked about the importance of durability and scalability
for your cloud-based applications. In most cases, really achieving these qualities means
automating tasks such as scaling and other operational tasks.
The Orchestration module provides a template-based way to describe a cloud
application, then coordinates running the needed OpenStack API calls to run
cloud applications. The templates allow you to create most OpenStack resource
types, such as instances, networking information, volumes, security groups
and even users. It also provides more advanced functionality, such as
instance high availability, instance auto-scaling, and nested stacks.
The OpenStack Orchestration API contains the following constructs:
* Stacks
* Resources
* Templates
Stacks are created from Templates, which contain Resources. Resources
are an abstraction in the HOT (Heat Orchestration Template) template language, which enables you to define different
cloud resources by setting the `type` attibute.
For example, you might use the Orchestration API to create two compute
instances by creating a Stack and by passing a Template to the Orchestration API.
That Template would contain two Resources with the `type` attribute set to `OS::Nova::Server`.
That's a simplistic example, of course, but the flexibility of the Resource object
enables the creation of Templates that contain all the required cloud
infrastructure to run an application, such as load balancers, block storage volumes,
compute instances, networking topology, and security policies.
.. note:: The Orchestration module isn't deployed by default in every cloud. If these commands don't work, it means the Orchestration API isn't available; ask your support team for assistance.
This section introduces the `HOT templating language <http://docs.openstack.org/developer/heat/template_guide/hot_guide.html>`_,
and takes you throughsome of the common calls you will make when working with OpenStack Orchestration.
Unlike previous sections of this guide, in which you used your SDK to programmatically interact with
OpenStack, in this section you'll be using the Orchestration API directly through Template files,
so we'll work from the command line.
Install the 'heat' commandline client by following this guide:
http://docs.openstack.org/cli-reference/content/install_clients.html
then set up the necessary variables for your cloud in an 'openrc' file using this guide:
http://docs.openstack.org/cli-reference/content/cli_openrc.html
.. only:: dotnet
.. warning:: the .NET SDK does not currently support OpenStack Orchestration
.. only:: fog
.. note:: fog `does support OpenStack Orchestration <https://github.com/fog/fog/tree/master/lib/fog/openstack/models/orchestration>`_.
.. only:: jclouds
.. warning:: Jclouds does not currently support OpenStack Orchestration. See this `bug report <https://issues.apache.org/jira/browse/JCLOUDS-693>`_.
.. only:: libcloud
.. warning:: libcloud does not currently support OpenStack Orchestration.
.. only:: node
.. note:: Pkgcloud supports OpenStack Orchestration :D:D:D but this section is `not written yet <https://github.com/pkgcloud/pkgcloud/blob/master/docs/providers/openstack/orchestration.md>`_
.. only:: openstacksdk
.. warning:: OpenStack SDK does not currently support OpenStack Orchestration.
.. only:: phpopencloud
.. note:: PHP-opencloud supports orchestration :D:D:D but this section is not written yet.
HOT Templating Language
-----------------------
The best place to learn about the template syntax for OpenStack Orchestration is the
`Heat Orchestration Template (HOT) Guide <http://docs.openstack.org/developer/heat/template_guide/hot_guide.html>`_
You should read the HOT Guide first to learn how to create basic templates, their inputs and outputs.
Working with Stacks: Basics
---------------------------
.. todo::
This section needs to have a HOT template written for deploying the Fractal Application
.. todo::
Replace the hello_world.yaml templte with the Fractal template
* Stack create
In the following example, we use the `hello_world <https://github.com/openstack/heat-templates/blob/master/hot/hello_world.yaml>`_ Hot template to demonstrate creating
a Nova compute instance, with a few configuration settings passed in, such as an administrative password and the unique identifier (UUID)
of an image:
::
$ wget https://raw.githubusercontent.com/openstack/heat-templates/master/hot/hello_world.yaml
$ heat stack-create --template-file hello_world.yaml \
--parameters admin_pass=Test123\;key_name=test\;image=5bbe4073-90c0-4ec9-833c-092459cc4539 hello_world
+--------------------------------------+-------------+--------------------+----------------------+
| id | stack_name | stack_status | creation_time |
+--------------------------------------+-------------+--------------------+----------------------+
| 0db2c026-fb9a-4849-b51d-b1df244096cd | hello_world | CREATE_IN_PROGRESS | 2015-04-01T03:20:25Z |
+--------------------------------------+-------------+--------------------+----------------------+
The resulting stack creates a Nova instance automatically, which you can see here:
::
$ nova list
+--------------------------------------+---------------------------------+--------+------------+-------------+------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+---------------------------------+--------+------------+-------------+------------------+
| 9bdf0e2f-415e-43a0-90ea-63a5faf86cf9 | hello_world-server-dwmwhzfxgoor | ACTIVE | - | Running | private=10.0.0.3 |
+--------------------------------------+---------------------------------+--------+------------+-------------+------------------+
Verify that the stack was successfully created using the following command:
::
$ heat stack-list
+--------------------------------------+-------------+-----------------+----------------------+
| id | stack_name | stack_status | creation_time |
+--------------------------------------+-------------+-----------------+----------------------+
| 0db2c026-fb9a-4849-b51d-b1df244096cd | hello_world | CREATE_COMPLETE | 2015-04-01T03:20:25Z |
+--------------------------------------+-------------+-----------------+----------------------+
Remove the stack:
::
$ heat stack-delete hello_world
+--------------------------------------+-------------+--------------------+----------------------+
| id | stack_name | stack_status | creation_time |
+--------------------------------------+-------------+--------------------+----------------------+
| 0db2c026-fb9a-4849-b51d-b1df244096cd | hello_world | DELETE_IN_PROGRESS | 2015-04-01T03:20:25Z |
+--------------------------------------+-------------+--------------------+----------------------+
Verify that the removal of the stack has deleted the nova instance:
::
$ nova list
+----+------+--------+------------+-------------+----------+
| ID | Name | Status | Task State | Power State | Networks |
+----+------+--------+------------+-------------+----------+
+----+------+--------+------------+-------------+----------+
While this stack is not very interesting - it just starts a single instance - it
is possible to make very complicated templates that involve dozens of instances
or adds and removes instances based on demand. Continue to the next section to
learn more.
Working with Stacks: Advanced
-----------------------------
.. todo:: needs more explanatory material
.. todo:: needs a heat template that uses fractal app
With the use of the Orchestration API, the Fractal app can create an autoscaling
group for all parts of the application, in order to dynamically provision more
compute resources during periods of heavy utilization, and also terminate compute
instances to scale down, as demand decreases.
There are two helpful articles available to learn about autoscaling with the
Orchestration API:
* http://superuser.openstack.org/articles/simple-auto-scaling-environment-with-heat
* http://superuser.openstack.org/articles/understanding-openstack-heat-auto-scaling
An example template that creates an auto-scaling wordpress instance can be found in
`the heat template repository <https://github.com/openstack/heat-templates/blob/master/hot/autoscaling.yaml>`_
Next Steps
----------
You should now be fairly confident working with the Orchestration service.
There are several calls we did not cover. To see these and more,
refer to the volume documentation of your SDK, or try a different step in the tutorial, including:
* :doc:`/section7` - to learn about more complex networking
* :doc:`/section8` - for advice for developers new to operations
* :doc:`/section9` - to see all the crazy things we think ordinary folks won't want to do ;)

View File

@ -0,0 +1,797 @@
=========================
Section Seven: Networking
=========================
.. todo:: Latter part of the chapter (LBaaS) needs to use Fractals app entities for the examples.
Prior to this chapter, all of the nodes that comprise the fractal application
were attached to the same network.
In this section of the tutorial, we introduce the Networking API,
which will enable us to build networking topologies that separate
public traffic accessing the application from traffic between the API
instances and the worker components, introduce load balancing for
resilience, and create a secure backend network for communication between the
database, webserver, file storage, and worker components.
.. only:: dotnet
.. warning:: This section has not yet been completed for the .NET SDK
.. only:: fog
.. warning:: fog `supports <http://www.rubydoc.info/gems/fog/1.8.0/Fog/Network/OpenStack>`_ the OpenStack Networking API, but this section has not yet been completed.
.. only:: jclouds
.. warning:: jClouds supports the OpenStack Networking API, but section has not yet been completed. Please see `this <https://gist.github.com/everett-toews/8701756>`_ in the meantime.
.. only:: libcloud
.. warning:: Libcloud does not support the OpenStack Networking API
.. only:: node
.. warning:: Pkgcloud supports the OpenStack Networking API, but this section has not been completed
.. only:: openstacksdk
.. warning:: This section has not yet been completed for the OpenStack SDK
.. only:: phpopencloud
.. warning:: PHP-OpenCloud supports the OpenStack Networking API, but this section has not been completed
Working with the CLI
--------------------
As SDKs don't currently support the OpenStack Networking API this section uses
the commandline.
Install the 'neutron' commandline client by following this guide:
http://docs.openstack.org/cli-reference/content/install_clients.html
Then set up the necessary variables for your cloud in an 'openrc' file using this guide:
http://docs.openstack.org/cli-reference/content/cli_openrc.html
Ensure you have an openrc.sh file, source it and then check your neutron client works:
::
$ cat openrc.sh
export OS_USERNAME=your_auth_username
export OS_PASSWORD=your_auth_password
export OS_TENANT_NAME=your_project_name
export OS_AUTH_URL=http://controller:5000/v2.0
export OS_REGION_NAME=your_region_name
$ source openrc.sh
$ neutron --version
2.3.11
Networking Segmentation
-----------------------
In traditional datacenters, multiple network segments are
dedicated to specific types of network traffic.
The fractal application we are building contains three types of network traffic:
* public-facing wev traffic
* API traffic
* internal worker traffic
For performance reasons, it makes sense to have a network for each tier,
so that traffic from one tier does not "crowd out" other types of traffic
and cause the application to fail. In addition, having separate networks makes
controlling access to parts of the application easier to manage, improving the overall
security of the application.
Prior to this section, the network layout for the Fractal application would be similar to the following diagram:
.. nwdiag::
nwdiag {
network public {
address = "203.0.113.0/24"
tenant_router [ address = "203.0.113.20" ];
}
network tenant_network {
address = "10.0.0.0/24"
tenant_router [ address = "10.0.0.1" ];
api [ address = "203.0.113.20, 10.0.0.3" ];
webserver1 [ address = "203.0.113.21, 10.0.0.4" ];
webserver2 [ address = "203.0.113.22, 10.0.0.5" ];
worker1 [ address = "203.0.113.23, 10.0.0.6" ];
worker2 [ address = "203.0.113.24, 10.0.0.7" ];
}
}
In this network layout, we are assuming that the OpenStack cloud in which
you have been building your application has a public
network and tenant router that was already created in advance, either by the
administrators of the cloud you are running the Fractal application on,
or by you, following the instructions in the appendix.
Many of the network concepts that are discussed in this section are
already present in the diagram above. A tenant router provides
routing and external access for the worker nodes, and floating IP addresses
are already associated with each node in the Fractal application cluster
to facilitate external access.
At the end of this section, we will be making some slight changes to the networking topology
by using the OpenStack Networking API to create a new network to which the worker nodes will attach
(10.0.1.0/24). We will use the API network (10.0.3.0/24) to attach the Fractal API servers. Webserver instances have their own network (10.0.2.0/24), and
will be accessible by fractal aficionados worldwide, by allocating floating IPs from the public network.
.. nwdiag::
nwdiag {
network public {
address = "203.0.113.0/24"
tenant_router [ address = "203.0.113.60"];
}
network webserver_network{
address = "10.0.2.0/24"
tenant_router [ address = "10.0.2.1"];
webserver1 [ address = "203.0.113.21, 10.0.2.3"];
webserver2 [ address = "203.0.113.22, 10.0.2.4"];
}
network api_network {
address = "10.0.3.0/24"
tenant_router [ address = "10.0.3.1" ];
api1 [ address = "10.0.3.3" ];
api2 [ address = "10.0.3.4" ];
}
network worker_network {
address = "10.0.1.0/24"
tenant_router [ address = "10.0.1.1" ];
worker1 [ address = "10.0.1.5" ];
worker2 [ address = "10.0.1.6" ];
}
}
Introduction to Tenant Networking
---------------------------------
With the OpenStack Networking API, the workflow for creating a network topology that separates the public-facing
Fractals app API from the worker backend is as follows:
* Create a network for the web server nodes.
* Create a network for the worker nodes. This is the private data network.
* Create a subnet for the private data network to use for addressing. In other words, when worker instances are created, their IP addresses will come from this subnet.
* Create a subnet for the web server network to use for addressing. In other words, when web server instances are created, their IP addresses will come from this subnet.
* Create a router for the private data network.
* Allocate floating ips and assign them to the web server nodes.
Creating Networks
-----------------
We assume that the public network, with the subnet that floating IPs can be allocated from, was provisioned
for you by your cloud operator. This is due to the nature of L3 routing, where the IP address range that
is used for floating IPs is configured in other parts of the operator's network, so that traffic is properly routed.
.. todo:: Rework the console outputs in these sections to be more comprehensive, based on the outline above
Next, create a private data network, worker_network:
::
$ neutron net-create worker_network
Created a new network:
+-----------------+--------------------------------------+
| Field | Value |
+-----------------+--------------------------------------+
| admin_state_up | True |
| id | 953224c6-c510-45c5-8a29-37deffd3d78e |
| name | worker_network |
| router:external | False |
| shared | False |
| status | ACTIVE |
| subnets | |
| tenant_id | f77bf3369741408e89d8f6fe090d29d2 |
+-----------------+--------------------------------------+
Now let's just confirm that we have both the worker network, and a public
network by getting a list of all networks in the cloud. The public network
doesn't have to be named public - it could be 'external', 'net04_ext' or
something else - the important thing is it exists and can be used to reach
the internet.
::
$ neutron net-list
+--------------------------------------+------------------+--------------------------------------------------+
| id | name | subnets |
+--------------------------------------+------------------+--------------------------------------------------+
| 29349515-98c1-4f59-922e-3809d1b9707c | public | 7203dd35-7d17-4f37-81a1-9554b3316ddb |
| 953224c6-c510-45c5-8a29-37deffd3d78e | worker_network | |
+--------------------------------------+------------------+--------------------------------------------------+
Next create the subnet from which addresses will be allocated for instances on the worker network:
::
$ neutron subnet-create --name worker_cidr worker_network 10.0.1.0/24
Created a new subnet:
+-------------------+--------------------------------------------+
| Field | Value |
+-------------------+--------------------------------------------+
| allocation_pools | {"start": "10.0.1.2", "end": "10.0.1.254"} |
| cidr | 10.0.1.0/24 |
| dns_nameservers | |
| enable_dhcp | True |
| gateway_ip | 10.0.1.1 |
| host_routes | |
| id | a0e2ebe4-5d4e-46b3-82b5-4179d778e615 |
| ip_version | 4 |
| ipv6_address_mode | |
| ipv6_ra_mode | |
| name | worker_cidr |
| network_id | 953224c6-c510-45c5-8a29-37deffd3d78e |
| tenant_id | f77bf3369741408e89d8f6fe090d29d2 |
+-------------------+--------------------------------------------+
Now create a network for the webservers ...
::
$ neutron net-create webserver_network
Created a new network:
+-----------------+--------------------------------------+
| Field | Value |
+-----------------+--------------------------------------+
| admin_state_up | True |
| id | 28cf9704-2b43-4925-b23e-22a892e354f2 |
| mtu | 0 |
| name | webserver_network |
| router:external | False |
| shared | False |
| status | ACTIVE |
| subnets | |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+-----------------+--------------------------------------+
... and a subnet from which they can pull IP addresses:
::
$ neutron subnet-create webserver_network 10.0.2.0/24
Created a new subnet:
+-------------------+--------------------------------------------+
| Field | Value |
+-------------------+--------------------------------------------+
| allocation_pools | {"start": "10.0.2.2", "end": "10.0.2.254"} |
| cidr | 10.0.2.0/24 |
| dns_nameservers | |
| enable_dhcp | True |
| gateway_ip | 10.0.2.1 |
| host_routes | |
| id | 1e0d6a75-c40e-4be5-8e13-b2226fc8444a |
| ip_version | 4 |
| ipv6_address_mode | |
| ipv6_ra_mode | |
| name | |
| network_id | 28cf9704-2b43-4925-b23e-22a892e354f2 |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+-------------------+--------------------------------------------+
Next, create the network for the API servers:
::
$ neutron net-create api_network
Created a new network:
+-----------------+--------------------------------------+
| Field | Value |
+-----------------+--------------------------------------+
| admin_state_up | True |
| id | 5fe4045a-65dc-4672-b44e-1f14a496a71a |
| mtu | 0 |
| name | api_network |
| router:external | False |
| shared | False |
| status | ACTIVE |
| subnets | |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+-----------------+--------------------------------------+
Finally, create the subnet for the API network:
::
$ neutron subnet-create api_network 10.0.3.0/24
Created a new subnet:
+-------------------+--------------------------------------------+
| Field | Value |
+-------------------+--------------------------------------------+
| allocation_pools | {"start": "10.0.3.2", "end": "10.0.3.254"} |
| cidr | 10.0.3.0/24 |
| dns_nameservers | |
| enable_dhcp | True |
| gateway_ip | 10.0.3.1 |
| host_routes | |
| id | 6ce4b60d-a940-4369-b8f0-2e9c196e4f20 |
| ip_version | 4 |
| ipv6_address_mode | |
| ipv6_ra_mode | |
| name | |
| network_id | 5fe4045a-65dc-4672-b44e-1f14a496a71a |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+-------------------+--------------------------------------------+
Now that you've got the networks created, go ahead and create two Floating IPs, for web servers.
Ensure that you replace 'public' with the name of the public/external network set up
by your cloud administrator.
::
$ neutron floatingip-create public
Created a new floatingip:
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| fixed_ip_address | |
| floating_ip_address | 203.0.113.21 |
| floating_network_id | 7ad1ce2b-4b8c-4036-a77b-90332d7f4dbe |
| id | 185df49f-7890-4c59-a66a-2456b6a87422 |
| port_id | |
| router_id | |
| status | DOWN |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+---------------------+--------------------------------------+
$ neutron floatingip-create public
Created a new floatingip:
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| fixed_ip_address | |
| floating_ip_address | 203.0.113.22 |
| floating_network_id | 7ad1ce2b-4b8c-4036-a77b-90332d7f4dbe |
| id | 185df49f-7890-4c59-a66a-2456b6a87422 |
| port_id | |
| router_id | |
| status | DOWN |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+---------------------+--------------------------------------+
.. note:: The world is running out of IPv4 addresses. If you get an error like
"No more IP addresses available on network", contact your cloud
administrator. You may also want to ask about IPv6 :)
Next we'll need to enable OpenStack to route traffic appropriately.
Creating the SNAT gateway
-------------------------
Because we are using cloud-init and other tools to deploy and bootstrap the application,
the Fractal app worker instances require Source Network Address Translation (SNAT).
If the Fractal app worker nodes were deployed from a "golden image"
that had all the software components already installed, there would be no need to create a
Neutron router to provide SNAT functionality.
.. todo :: nickchase doesn't understand the above paragraph. Why wouldn't it be required?
::
$ neutron router-create tenant_router
Created a new router:
+-----------------------+--------------------------------------+
| Field | Value |
+-----------------------+--------------------------------------+
| admin_state_up | True |
| external_gateway_info | |
| id | d380b29f-ca65-4718-9735-196cbed10fce |
| name | tenant_router |
| routes | |
| status | ACTIVE |
| tenant_id | f77bf3369741408e89d8f6fe090d29d2 |
+-----------------------+--------------------------------------+
After creating the router, you need to set up the gateway for the router. For outbound access
we will set the router's gateway as the public network.
::
$ neutron router-gateway-set worker_router public
Set gateway for router tenant_router
$ neutron router-show tenant_router
+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| admin_state_up | True |
| external_gateway_info | {"network_id": "29349515-98c1-4f59-922e-3809d1b9707c", "enable_snat": true, "external_fixed_ips": [{"subnet_id": "7203dd35-7d17-4f37-81a1-9554b3316ddb", "ip_address": "203.0.113.50"}]} |
| id | d380b29f-ca65-4718-9735-196cbed10fce |
| name | tenant_router |
| routes | |
| status | ACTIVE |
| tenant_id | f77bf3369741408e89d8f6fe090d29d2 |
+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
The final, most important step is to create an interface on the worker network and attach it to the router you just created.
::
$ neutron router-interface-add tenant_router worker_cidr
Added interface 0d8bd523-06c2-4ddd-8b33-8726af2daa0a to router worker_router.
::
$ neutron net-list
+--------------------------------------+----------------+--------------------------------------------------+
| id | name | subnets |
+--------------------------------------+----------------+--------------------------------------------------+
| 29349515-98c1-4f59-922e-3809d1b9707c | public | 7203dd35-7d17-4f37-81a1-9554b3316ddb |
| 953224c6-c510-45c5-8a29-37deffd3d78e | worker_network | a0e2ebe4-5d4e-46b3-82b5-4179d778e615 10.0.1.0/24 |
+--------------------------------------+----------------+--------------------------------------------------+
.. todo:
Wire up the tenant router to the api_network and webserver_network
Booting a worker
~~~~~~~~~~~~~~~~
Now that you've prepared the networking infrastructure, you can go ahead and boot an instance on it.
Ensure you use appropriate flavor and image values for your cloud - see :doc:`section1` if you've not already.
.. todo:: Show how to create an instance in libcloud using the network we just created. - libcloud does not yet support this.
::
$ nova boot --flavor m1.tiny --image cirros-0.3.3-x86_64-disk --nic net-id=953224c6-c510-45c5-8a29-37deffd3d78e worker1
+--------------------------------------+-----------------------------------------------------------------+
| Property | Value |
+--------------------------------------+-----------------------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | nova |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | 9vU8KSY4oDht |
| config_drive | |
| created | 2015-03-30T05:26:04Z |
| flavor | m1.tiny (1) |
| hostId | |
| id | 9e188a47-a246-463e-b445-027d6e2966e0 |
| image | cirros-0.3.3-x86_64-disk (ad605ff9-4593-4048-900b-846d6401c193) |
| key_name | - |
| metadata | {} |
| name | worker1 |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | default |
| status | BUILD |
| tenant_id | f77bf3369741408e89d8f6fe090d29d2 |
| updated | 2015-03-30T05:26:04Z |
| user_id | a61292a5691d4c6c831b7a8f07921261 |
+--------------------------------------+-----------------------------------------------------------------+
Load Balancing
--------------
After separating the Fractal worker nodes into their own network,
the next logical step is to move the Fractal API service onto a load balancer,
so that multiple API workers can handle requests. By using a load balancer, the API
service can be scaled out in a similar fashion to the worker nodes.
Neutron LbaaS API
~~~~~~~~~~~~~~~~~
.. note:: This section is based on the Neutron LBaaS API version 1.0 http://docs.openstack.org/admin-guide-cloud/content/lbaas_workflow.html
.. todo:: libcloud support added 0.14: https://developer.rackspace.com/blog/libcloud-0-dot-14-released/ - this section needs rewriting to use the libcloud API
The OpenStack Networking API provides support for creating loadbalancers, which can be used to
scale the Fractal app web service. In the following example, we create two compute instances via the Compute
API, then instantiate a loadbalancer that will use a virtual IP (VIP) for accessing the web service offered by
the two compute nodes. The end result will be the following network topology:
.. nwdiag::
nwdiag {
network public {
address = "203.0.113.0/24"
tenant_router [ address = "203.0.113.60" ];
loadbalancer [ address = "203.0.113.63" ];
}
network webserver_network {
address = "10.0.2.0/24"
tenant_router [ address = "10.0.2.1"];
webserver1 [ address = "203.0.113.21, 10.0.2.3"];
webserver2 [ address = "203.0.113.22, 10.0.2.4"];
}
}
libcloud support added 0.14: https://developer.rackspace.com/blog/libcloud-0-dot-14-released/
Let's start by looking at what's already in place.
::
$ neutron net-list
+--------------------------------------+-------------------+-----------------------------------------------------+
| id | name | subnets |
+--------------------------------------+-------------------+-----------------------------------------------------+
| 3c826379-e896-45a9-bfe1-8d84e68e9c63 | webserver_network | 3eada497-36dd-485b-9ba4-90c5e3340a53 10.0.2.0/24 |
| 7ad1ce2b-4b8c-4036-a77b-90332d7f4dbe | public | 47fd3ff1-ead6-4d23-9ce6-2e66a3dae425 203.0.113.0/24 |
+--------------------------------------+-------------------+-----------------------------------------------------+
Now let's go ahead and create 2 instances.
::
$ nova boot --flavor 1 --image 53ff0943-99ba-42d2-a10d-f66656372f87 --min-count 2 test
+--------------------------------------+-----------------------------------------------------------------+
| Property | Value |
+--------------------------------------+-----------------------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | nova |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | z84zWFCcpppH |
| config_drive | |
| created | 2015-04-02T02:45:09Z |
| flavor | m1.tiny (1) |
| hostId | |
| id | 8d579f4a-116d-46b9-8db3-aa55b76f76d8 |
| image | cirros-0.3.3-x86_64-disk (53ff0943-99ba-42d2-a10d-f66656372f87) |
| key_name | - |
| metadata | {} |
| name | test-1 |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | default |
| status | BUILD |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
| updated | 2015-04-02T02:45:09Z |
| user_id | d95381d331034e049727e2413efde39f |
+--------------------------------------+-----------------------------------------------------------------+
Confirm that they were added:
::
$ nova list
+--------------------------------------+--------+--------+------------+-------------+------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+--------+--------+------------+-------------+------------------+
| 8d579f4a-116d-46b9-8db3-aa55b76f76d8 | test-1 | ACTIVE | - | Running | private=10.0.2.4 |
| 8fadf892-b6e9-44f4-b132-47c6762ffa2c | test-2 | ACTIVE | - | Running | private=10.0.2.3 |
+--------------------------------------+--------+--------+------------+-------------+------------------+
Now let's look at what ports are available:
::
$ neutron port-list
+--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
| id | name | mac_address | fixed_ips |
+--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
| 1d9a0f79-bf05-443e-b65d-a05b0c635936 | | fa:16:3e:10:f8:f0 | {"subnet_id": "3eada497-36dd-485b-9ba4-90c5e3340a53", "ip_address": "10.0.2.2"} |
| 3f40c866-169b-48ec-8e0a-d9f1e70e5756 | | fa:16:3e:8c:6f:25 | {"subnet_id": "3eada497-36dd-485b-9ba4-90c5e3340a53", "ip_address": "10.0.2.1"} |
| 462c92c6-941c-48ab-8cca-3c7a7308f580 | | fa:16:3e:d7:7d:56 | {"subnet_id": "3eada497-36dd-485b-9ba4-90c5e3340a53", "ip_address": "10.0.2.4"} |
| 7451d01f-bc3b-46a6-9ae3-af260d678a63 | | fa:16:3e:c6:d4:9c | {"subnet_id": "3eada497-36dd-485b-9ba4-90c5e3340a53", "ip_address": "10.0.2.3"} |
+--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
Next create additional floating IPs by specifying the fixed IP addresses they should point to and the ports they should use:
::
$ neutron floatingip-create public --fixed-ip-address 10.0.2.3 --port-id 7451d01f-bc3b-46a6-9ae3-af260d678a63
Created a new floatingip:
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| fixed_ip_address | 10.0.2.3 |
| floating_ip_address | 203.0.113.21 |
| floating_network_id | 7ad1ce2b-4b8c-4036-a77b-90332d7f4dbe |
| id | dd2c838e-7c1b-480c-a18c-17f1526c96ea |
| port_id | 7451d01f-bc3b-46a6-9ae3-af260d678a63 |
| router_id | 7f8ee1f6-7211-40e8-b9a8-17582ecfe50b |
| status | DOWN |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+---------------------+--------------------------------------+
$ neutron floatingip-create public --fixed-ip-address 10.0.2.4 --port-id 462c92c6-941c-48ab-8cca-3c7a7308f580
Created a new floatingip:
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| fixed_ip_address | 10.0.2.4 |
| floating_ip_address | 203.0.113.22 |
| floating_network_id | 7ad1ce2b-4b8c-4036-a77b-90332d7f4dbe |
| id | 6eb510bf-c18f-4c6f-bb35-e21938ca8bd4 |
| port_id | 462c92c6-941c-48ab-8cca-3c7a7308f580 |
| router_id | 7f8ee1f6-7211-40e8-b9a8-17582ecfe50b |
| status | DOWN |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+---------------------+--------------------------------------+
All right, now you're ready to go ahead and create members for the load balancer pool, referencing the floating IPs:
::
$ neutron lb-member-create --address 203.0.113.21 --protocol-port 80 mypool
Created a new member:
+--------------------+--------------------------------------+
| Field | Value |
+--------------------+--------------------------------------+
| address | 203.0.113.21 |
| admin_state_up | True |
| id | 679966a9-f719-4df0-86cf-3a24d0433b38 |
| pool_id | 600496f0-196c-431c-ae35-a0af9bb01d32 |
| protocol_port | 80 |
| status | PENDING_CREATE |
| status_description | |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
| weight | 1 |
+--------------------+--------------------------------------+
$ neutron lb-member-create --address 203.0.113.22 --protocol-port 80 mypool
Created a new member:
+--------------------+--------------------------------------+
| Field | Value |
+--------------------+--------------------------------------+
| address | 203.0.113.22 |
| admin_state_up | True |
| id | f3ba0605-4926-4498-b86d-51002892e93a |
| pool_id | 600496f0-196c-431c-ae35-a0af9bb01d32 |
| protocol_port | 80 |
| status | PENDING_CREATE |
| status_description | |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
| weight | 1 |
+--------------------+--------------------------------------+
You should be able to see them in the member list:
::
$ neutron lb-member-list
+--------------------------------------+--------------+---------------+--------+----------------+--------+
| id | address | protocol_port | weight | admin_state_up | status |
+--------------------------------------+--------------+---------------+--------+----------------+--------+
| 679966a9-f719-4df0-86cf-3a24d0433b38 | 203.0.113.21 | 80 | 1 | True | ACTIVE |
| f3ba0605-4926-4498-b86d-51002892e93a | 203.0.113.22 | 80 | 1 | True | ACTIVE |
+--------------------------------------+--------------+---------------+--------+----------------+--------+
Now let's create a healthmonitor that will ensure that members of the loadbalancer pool are active and able
to respond to requests. If a member in the pool dies or is unresponsive, the member is removed from the pool
so that client requests are routed to another active member.
::
$ neutron lb-healthmonitor-create --delay 3 --type HTTP --max-retries 3 --timeout 3
Created a new health_monitor:
+----------------+--------------------------------------+
| Field | Value |
+----------------+--------------------------------------+
| admin_state_up | True |
| delay | 3 |
| expected_codes | 200 |
| http_method | GET |
| id | 663345e6-2853-43b2-9ccb-a623d5912345 |
| max_retries | 3 |
| pools | |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
| timeout | 3 |
| type | HTTP |
| url_path | / |
+----------------+--------------------------------------+
$ neutron lb-healthmonitor-associate 663345e6-2853-43b2-9ccb-a623d5912345 mypool
Associated health monitor 663345e6-2853-43b2-9ccb-a623d5912345
Now create a virtual IP that will be used to direct traffic between the various members of the pool:
::
$ neutron lb-vip-create --name myvip --protocol-port 80 --protocol HTTP --subnet-id 47fd3ff1-ead6-4d23-9ce6-2e66a3dae425 mypool
Created a new vip:
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| address | 203.0.113.63 |
| admin_state_up | True |
| connection_limit | -1 |
| description | |
| id | f0bcb66e-5eeb-447b-985e-faeb67540c2f |
| name | myvip |
| pool_id | 600496f0-196c-431c-ae35-a0af9bb01d32 |
| port_id | bc732f81-2640-4622-b624-993a5ae185c5 |
| protocol | HTTP |
| protocol_port | 80 |
| session_persistence | |
| status | PENDING_CREATE |
| status_description | |
| subnet_id | 47fd3ff1-ead6-4d23-9ce6-2e66a3dae425 |
| tenant_id | 0cb06b70ef67424b8add447415449722 |
+---------------------+--------------------------------------+
And confirm it's in place:
::
$ neutron lb-vip-list
+--------------------------------------+-------+--------------+----------+----------------+--------+
| id | name | address | protocol | admin_state_up | status |
+--------------------------------------+-------+--------------+----------+----------------+--------+
| f0bcb66e-5eeb-447b-985e-faeb67540c2f | myvip | 203.0.113.63 | HTTP | True | ACTIVE |
+--------------------------------------+-------+--------------+----------+----------------+--------+
Now let's look at the big picture.
Final Result
------------
With the addition of the loadbalancer, the Fractal app's networking topology now reflects the modular
nature of the application itself.
.. nwdiag::
nwdiag {
network public {
address = "203.0.113.0/24"
tenant_router [ address = "203.0.113.60"];
loadbalancer [ address = "203.0.113.63" ];
}
network webserver_network{
address = "10.0.2.0/24"
tenant_router [ address = "10.0.2.1"];
webserver1 [ address = "203.0.113.21, 10.0.2.3"];
webserver2 [ address = "203.0.113.22, 10.0.2.4"];
}
network api_network {
address = "10.0.3.0/24"
tenant_router [ address = "10.0.3.1" ];
api1 [ address = "10.0.3.3" ];
api2 [ address = "10.0.3.4" ];
}
network worker_network {
address = "10.0.1.0/24"
tenant_router [ address = "10.0.1.1" ];
worker1 [ address = "10.0.1.5" ];
worker2 [ address = "10.0.1.6" ];
}
}
Next Steps
----------
You should now be fairly confident working with Network API.
There are several calls we did not cover. To see these and more,
refer to the volume documentation of your SDK, or try a different step in the tutorial, including:
* :doc:`/section8` - for advice for developers new to operations
* :doc:`/section9` - to see all the crazy things we think ordinary folks won't want to do ;)

View File

@ -0,0 +1,119 @@
======================================================
Section Eight: Advice for Developers new to Operations
======================================================
In this section, we will introduce some operational concepts and tasks which may
be new to developers who have not written cloud applications before.
Monitoring
----------
Monitoring is essential for cloud applications, especially if the application is
to be 'scalable'. You must know how many requests are coming in, and what impact
that has on the various services -- in other words, enough information to determine whether you
should start another worker or API service as we did in :doc:`/section3`.
.. todo:: explain how to achieve this kind of monitoring. Ceilometer? (STOP LAUGHING.)
Aside from this kind of monitoring, you should consider availability monitoring.
Does your application care about a worker going down? Maybe not. Does it care
about a failed database server? Probably yes.
One great pattern to add this to your application is the
`Health Endpoint Monitoring Pattern <https://msdn.microsoft.com/en-us/library/dn589789.aspx>`,
where a special API endpoint is introduced to your application for a basic
health check.
Backups
-------
Where instances store information that is not reproducable (such as a database
server, a file server, or even log files for an application), it is important to
back up this information as you would a normal non-cloud server. It sounds
simple, but just because it is 'in the cloud' does not mean it has any additional
robustness or resilience when it comes to failure of the underlying hardware or systems.
OpenStack provides a couple of tools that make it easier to perform backups. If
your provider runs OpenStack Object Storage, this is normally extremely robust
and has several handy API calls and CLI tools for working with archive files.
It is also possible to create snapshots of running instances and persistent
volumes using the OpenStack API. Refer to the documentation of your SDK for
more.
.. todo:: link to appropriate documentation, or better yet, link and also include the commands here
While the technical action to perform backups can be straightforward, you should
also think about your policies regarding what is backed up and how long each item
should be retained.
Phoenix Servers
---------------
Application developers and operators who employ
`Phoenix Servers <http://martinfowler.com/bliki/PhoenixServer.html>`_
have built systems that start from a known baseline (sometimes just a specific
version of an operating system) and have built tooling that will automatically
build, install, and configure a system with no manual intervention.
Phoenix Servers, named for the mythological bird that would live its life,
be consumed by fire, then rise from the ashes to live again, make it possible
to easily "start over" with new instances.
If your application is automatically deployed on a regular basis, resolving outages and
security updates are not special operations that require manual intervention.
If you suffer an outage, provision more resources in another region. If you have
to patch security holes, provision more compute nodes that will be built with
the updated/patched software, then terminate vulnerable nodes, with traffic
automatically failing over to the new instances.
Security
--------
Security-wise, one thing to keep in mind is that if one instance of an application
is compromised, all instances with the same image and configuration are likely
to suffer the same vulnerability. In this case, it is safer to rebuild all of your
instances (a task made easier by configuration management - see below).
Configuration Management
------------------------
Tools such as Ansible, Chef, and Puppet allow you to describe exactly what should
be installed on an instance and how it should be configured. Using these
descriptions, the tool implements any changes required to get to the desired state.
These tools vastly reduce the amount of effort it takes to work with large numbers of servers,
and also improves the ability to recreate, update, move, or distribute applications.
Application Deployment
----------------------
Related to configuration management is the question of how you deploy your application.
For example, do you:
* pull the latest code from a source control repository?
* make packaged releases that update infrequently?
* big-bang test in a development environment and deploy only after major changes?
One of the latest trends in deploying scalable cloud applications is
`continuous integration <http://en.wikipedia.org/wiki/Continuous_integration>`_ /
`continuous deployment <http://en.wikipedia.org/wiki/Continuous_delivery>`_ (CI/CD).
Working in a CI/CD fashion means
you are always testing your application and making frequent deployments to
production.
In this tutorial, we have downloaded the latest version of our application
from source and installed it on a standard image. Our magic install script also
updates the standard image to have the latest dependencies we need to run the
application.
Another approach to this is to create a 'gold' image - one that has your
application and dependencies pre-installed. This means faster boot times and
a higher degree of control over what is on the instance, however a process is
needed to ensure that 'gold' images do not fall behind on security updates.
Fail Fast
---------

View File

@ -0,0 +1,60 @@
=========================
Section Nine: Going Crazy
=========================
In this section, we will look at further options for expanding the sample application.
Regions and geographic diversity
--------------------------------
.. note:: For more information on multi-site clouds, check out the `Multi-Site chapter <http://docs.openstack.org/arch-design/content/multi_site.html>`_ of the Architecture Design Guide.
OpenStack supports the concepts of 'Regions' - ususally geographicaly separated installations that are
all connected to the one service catalogue. This section explains how to expand the Fractal app to
to use multiple regions for high availability.
.. note:: This section is incomplete. Please help us finish it!
Multiple clouds
---------------
.. note:: For more information on hybrid-clouds, check out the `Hybrid Cloud chapter <http://docs.openstack.org/arch-design/content/hybrid.html>`_ of the Architecture Design Guide
Sometimes, you want to use multiple clouds, such as a private cloud inside your organisation
and a public cloud. This section attempts to do exactly that.
.. note:: This section is incomplete. Please help us finish it!
High Availability
-----------------
Using Pacemaker to look at the API.
.. note:: This section is incomplete. Please help us finish it!
conf.d, etc.d
-------------
Use conf.d and etc.d.
In earlier sections, the Fractal Application uses an install script, with parameters passed in from the metadata API,
in order to bootstrap the cluster. `Etcd <https://github.com/coreos/etcd>`_ is a "a distributed, consistent key value store for shared configuration and service discovery"
that can be used for storing configuration. Updated versions of the Fractal worker
component could be writted to connect to Etcd, or use `Confd <https://github.com/kelseyhightower/confd>`_ which will
poll for changes from Etcd and write changes to a configuration file on the local filesystem, which the Fractal worker
could use for configuration.
Using Swift instead of a database
---------------------------------
We haven't quite figured out how to do this yet, but the general steps involve changing the fractal upload
code to store metadata with the object in swift, then changing the API code such as "list fractals" to
query swift to retrieve the metadata. If you do this, you should be able to stop using a database.
.. note:: This section is incomplete. Please help us finish it!
Next Steps
----------
Wow, if you've made it through this section, you know more about
working with OpenStack clouds than the authors of this guide.
Perhaps you can `contribute <https://wiki.openstack.org/wiki/Documentation/HowTo>`_?

View File

@ -0,0 +1,55 @@
# step-1
require 'fog'
auth_username = 'your_auth_username'
auth_password = 'your_auth_password'
auth_url = 'http://controller:5000'
project_name = 'your_project_name_or_id'
region_name = 'your_region_name'
conn = Fog::Compute.new({
:provider => 'openstack',
:openstack_auth_url => auth_url + '/v2.0/tokens',
:openstack_username => auth_username
:openstack_tenant => project_name
:openstack_api_key => auth_password
})
# step-2
images = conn.list_images
print images.body
# step-3
flavors = conn.list_flavors
print flavors.body
# step-4
image_id = '2cccbea0-cea9-4f86-a3ed-065c652adda5'
image = conn.images.get image_id
print image
# step-5
flavor_id = '3'
image = conn.flavor.get flavor_id
print flavor
# step-6
instance_name = 'testing'
testing_instance = conn.servers.create(:name => instance_name, :flavor_ref => flavor.id, :image_ref => image.id)
# step-7
conn.servers
# step-8
testing_instance.destroy
# step-9
# step-10
all_in_one_security_group = conn.create_security_group 'all-in-one' 'network access for all-in-one application.'
conn.create_security_group_rule all_in_one_security_group 'TCP' 80 80
conn.create_security_group_rule all_in_one_security_group 'TCP' 22 22
# step-11
# step-12
# step-13

View File

@ -0,0 +1,119 @@
# step-1
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
auth_username = 'your_auth_username'
auth_password = 'your_auth_password'
auth_url = 'http://controller:5000'
project_name = 'your_project_name_or_id'
region_name = 'your_region_name'
provider = get_driver(Provider.OPENSTACK)
conn = provider(auth_username,
auth_password,
ex_force_auth_url=auth_url,
ex_force_auth_version='2.0_password',
ex_tenant_name=project_name,
ex_force_service_region=region_name)
# step-2
images = conn.list_images()
for image in images:
print(image)
# step-3
flavors = conn.list_sizes()
for flavor in flavors:
print(flavor)
# step-4
image_id = '2cccbea0-cea9-4f86-a3ed-065c652adda5'
image = conn.get_image(image_id)
print(image)
# step-5
flavor_id = '3'
flavor = conn.ex_get_size(flavor_id)
print(flavor)
# step-6
instance_name = 'testing'
testing_instance = conn.create_node(name=instance_name, image=image, size=flavor)
print(testing_instance)
# step-7
instances = conn.list_nodes()
for instance in instances:
print(instance)
# step-8
conn.destroy_node(testing_instance)
# step-9
print('Checking for existing SSH key pair...')
keypair_name = 'demokey'
pub_key_file = '~/.ssh/id_rsa.pub'
keypair_exists = False
for keypair in conn.list_key_pairs():
if keypair.name == keypair_name:
keypair_exists = True
if keypair_exists:
print('Keypair already exists. Skipping import.')
else:
print('adding keypair...')
conn.import_key_pair_from_file(keypair_name, pub_key_file)
for keypair in conn.list_key_pairs():
print(keypair)
# step-10
security_group_exists = False
for security_group in conn.ex_list_security_groups():
if security_group.name =='all-in-one':
all_in_one_security_group = security_group
security_group_exists = True
if security_group_exists:
print('Security Group already exists. Skipping creation.')
else:
all_in_one_security_group = conn.ex_create_security_group('all-in-one', 'network access for all-in-one application.')
conn.ex_create_security_group_rule(all_in_one_security_group, 'TCP', 80, 80)
conn.ex_create_security_group_rule(all_in_one_security_group, 'TCP', 22, 22)
# step-11
userdata = '''#!/usr/bin/env bash
curl -L -s https://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash -s -- \
-i faafo -i messaging -r api -r worker -r demo
'''
# step-12
instance_name = 'all-in-one'
testing_instance = conn.create_node(name=instance_name,
image=image,
size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata,
ex_security_groups=[all_in_one_security_group])
conn.wait_until_running([testing_instance])
# step-13
print('Checking for unused Floating IP...')
unused_floating_ip = None
for floating_ip in conn.ex_list_floating_ips():
if floating_ip.node_id:
unused_floating_ip = floating_ip
break
if not unused_floating_ip:
pool = conn.ex_list_floating_ip_pools()[0]
print('Allocating new Floating IP from pool: {}'.format(pool))
unused_floating_ip = pool.create_floating_ip()
# step-14
conn.ex_attach_floating_ip_to_node(testing_instance, unused_floating_ip)
# step-15
print('The Fractals app will be deployed to http://%s' % unused_floating_ip.ip_address)

View File

@ -0,0 +1,133 @@
# step-1
userdata = '''#!/usr/bin/env bash
curl -L -s https://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash -s -- \
-i faafo -i messaging -r api -r worker -r demo
'''
instance_name = 'all-in-one'
testing_instance = conn.create_node(name=instance_name,
image=image,
size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata,
ex_security_groups=[all_in_one_security_group])
# step-2
userdata = '''#!/usr/bin/env bash
curl -L -s https://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash -s -- \
-i messaging -i faafo -r api -r worker -r demo
'''
# step-3
all_in_one_security_group = conn.ex_create_security_group('all-in-one', 'network access for all-in-one application.')
conn.ex_create_security_group_rule(all_in_one_security_group, 'TCP', 80, 80)
conn.ex_create_security_group_rule(all_in_one_security_group, 'TCP', 22, 22)
# step-4
conn.ex_list_security_groups()
# step-5
conn.ex_delete_security_group_rule(rule)
conn.ex_delete_security_group(security_group)
# step-6
conn.ex_get_node_security_groups(testing_instance)
# step-7
unused_floating_ip = None
for floating_ip in conn.ex_list_floating_ips():
if not floating_ip.node_id:
unused_floating_ip = floating_ip
print("Found an unused Floating IP: %s" % floating_ip)
break
# step-8
pool = conn.ex_list_floating_ip_pools()[0]
# step-9
unused_floating_ip = pool.create_floating_ip()
# step-10
conn.ex_attach_floating_ip_to_node(instance, unused_floating_ip)
# step-11
worker_group = conn.ex_create_security_group('worker', 'for services that run on a worker note')
conn.ex_create_security_group_rule(worker_group, 'TCP', 22, 22)
controller_group = conn.ex_create_security_group('control', 'for services that run on a control note')
conn.ex_create_security_group_rule(controller_group, 'TCP', 22, 22)
conn.ex_create_security_group_rule(controller_group, 'TCP', 80, 80)
conn.ex_create_security_group_rule(controller_group, 'TCP', 5672, 5672, source_security_group=worker_group)
userdata = '''#!/usr/bin/env bash
curl -L -s http://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash -s -- \
-i messaging -i faafo -r api
'''
instance_controller_1 = conn.create_node(name='app-controller',
image=image,
size=flavor,
ex_keyname='demokey',
ex_userdata=userdata,
ex_security_groups=[controller_group])
conn.wait_until_running([instance_controller_1])
print('Checking for unused Floating IP...')
unused_floating_ip = None
for floating_ip in conn.ex_list_floating_ips():
if not floating_ip.node_id:
unused_floating_ip = floating_ip
break
if not unused_floating_ip:
pool = conn.ex_list_floating_ip_pools()[0]
print('Allocating new Floating IP from pool: {}'.format(pool))
unused_floating_ip = pool.create_floating_ip()
conn.ex_attach_floating_ip_to_node(instance_controller_1, unused_floating_ip)
print('Application will be deployed to http://%s' % unused_floating_ip.ip_address)
# step-12
instance_controller_1 = conn.ex_get_node_details(instance_controller_1.id)
if instance_controller_1.public_ips:
ip_controller = instance_controller_1.private_ips[0]
else:
ip_controller = instance_controller_1.public_ips[0]
userdata = '''#!/usr/bin/env bash
curl -L -s http://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash -s -- \
-i faafo -r worker -e 'http://%(ip_controller)s' -m 'amqp://guest:guest@%(ip_controller)s:5672/'
''' % {'ip_controller': ip_controller}
instance_worker_1 = conn.create_node(name='app-worker-1',
image=image,
size=flavor,
ex_keyname='demokey',
ex_userdata=userdata,
ex_security_groups=[worker_group])
conn.wait_until_running([instance_worker_1])
print('Checking for unused Floating IP...')
unused_floating_ip = None
for floating_ip in conn.ex_list_floating_ips():
if not floating_ip.node_id:
unused_floating_ip = floating_ip
break
if not unused_floating_ip:
pool = conn.ex_list_floating_ip_pools()[0]
print('Allocating new Floating IP from pool: {}'.format(pool))
unused_floating_ip = pool.create_floating_ip()
conn.ex_attach_floating_ip_to_node(instance_worker_1, unused_floating_ip)
print('The worker will be available for SSH at %s' % unused_floating_ip.ip_address)
# step-13
ip_instance_worker_1 = instance_worker_1.private_ips[0]
print(ip_instance_worker_1)
# step-14

View File

@ -0,0 +1,110 @@
# step-1
for instance in conn.list_nodes():
if instance.name in ['all-in-one','app-worker-1', 'app-worker-2', 'app-controller']:
print('Destroying Instance: %s' % instance.name)
conn.destroy_node(instance)
for group in conn.ex_list_security_groups():
if group.name in ['control', 'worker', 'api', 'services']:
print('Deleting security group: %s' % group.name)
conn.ex_delete_security_group(group)
# step-2
api_group = conn.ex_create_security_group('api', 'for API services only')
conn.ex_create_security_group_rule(api_group, 'TCP', 80, 80)
conn.ex_create_security_group_rule(api_group, 'TCP', 22, 22)
worker_group = conn.ex_create_security_group('worker', 'for services that run on a worker note')
conn.ex_create_security_group_rule(worker_group, 'TCP', 22, 22)
controller_group = conn.ex_create_security_group('control', 'for services that run on a control note')
conn.ex_create_security_group_rule(controller_group, 'TCP', 22, 22)
conn.ex_create_security_group_rule(controller_group, 'TCP', 80, 80)
conn.ex_create_security_group_rule(controller_group, 'TCP', 5672, 5672, source_security_group=worker_group)
services_group = conn.ex_create_security_group('services', 'for DB and AMQP services only')
conn.ex_create_security_group_rule(services_group, 'TCP', 22, 22)
conn.ex_create_security_group_rule(services_group, 'TCP', 3306, 3306, source_security_group=api_group)
conn.ex_create_security_group_rule(services_group, 'TCP', 5672, 5672, source_security_group=worker_group)
conn.ex_create_security_group_rule(services_group, 'TCP', 5672, 5672, source_security_group=api_group)
# step-3
def get_floating_ip(conn):
'''A helper function to re-use available Floating IPs'''
unused_floating_ip = None
for floating_ip in conn.ex_list_floating_ips():
if not floating_ip.node_id:
unused_floating_ip = floating_ip
break
if not unused_floating_ip:
pool = conn.ex_list_floating_ip_pools()[0]
unused_floating_ip = pool.create_floating_ip()
return unused_floating_ip
# step-4
userdata = '''#!/usr/bin/env bash
curl -L -s http://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash -s -- \
-i database -i messaging
'''
instance_services = conn.create_node(name='app-services',
image=image,
size=flavor,
ex_keyname='demokey',
ex_userdata=userdata,
ex_security_groups=[services_group])
instance_services = conn.wait_until_running([instance_services])[0][0]
services_ip = instance_services.private_ips[0]
# step-5
userdata = '''#!/usr/bin/env bash
curl -L -s http://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash -s -- \
-i faafo -r api -m 'amqp://guest:guest@%(services_ip)s:5672/' \
-d 'mysql://faafo:password@%(services_ip)s:3306/faafo'
''' % { 'services_ip': services_ip }
instance_api_1 = conn.create_node(name='app-api-1',
image=image,
size=flavor,
ex_keyname='demokey',
ex_userdata=userdata,
ex_security_groups=[api_group])
instance_api_2 = conn.create_node(name='app-api-2',
image=image,
size=flavor,
ex_keyname='demokey',
ex_userdata=userdata,
ex_security_groups=[api_group])
instance_api_1 = conn.wait_until_running([instance_api_1])[0][0]
api_1_ip = instance_api_1.private_ips[0]
instance_api_2 = conn.wait_until_running([instance_api_2])[0][0]
api_2_ip = instance_api_2.private_ips[0]
for instance in [instance_api_1, instance_api_2]:
floating_ip = get_floating_ip(conn)
conn.ex_attach_floating_ip_to_node(instance, floating_ip)
print('allocated %(ip)s to %(host)s' % {'ip': floating_ip.ip_address, 'host': instance.name})
# step-6
userdata = '''#!/usr/bin/env bash
curl -L -s http://git.openstack.org/cgit/stackforge/faafo/plain/contrib/install.sh | bash -s -- \
-i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://guest:guest@%(services_ip)s:5672/'
''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip}
instance_worker_1 = conn.create_node(name='app-worker-1',
image=image, size=flavor,
ex_keyname='demokey',
ex_userdata=userdata,
ex_security_groups=[worker_group])
instance_worker_2 = conn.create_node(name='app-worker-2',
image=image, size=flavor,
ex_keyname='demokey',
ex_userdata=userdata,
ex_security_groups=[worker_group])
instance_worker_3 = conn.create_node(name='app-worker-3',
image=image, size=flavor,
ex_keyname='demokey',
ex_userdata=userdata,
ex_security_groups=[worker_group])
# step-7

View File

@ -0,0 +1,93 @@
# step-1
from libcloud.storage.types import Provider
from libcloud.storage.providers import get_driver
auth_username = 'your_auth_username'
auth_password = 'your_auth_password'
auth_url = 'http://controller:5000'
project_name = 'your_project_name_or_id'
region_name = 'your_region_name'
provider = get_driver(Provider.OPENSTACK_SWIFT)
swift = provider(auth_username,
auth_password,
ex_force_auth_url=auth_url,
ex_force_auth_version='2.0_password',
ex_tenant_name=project_name,
ex_force_service_region=region_name)
# step-2
container_name = 'fractals'
container = swift.create_container(container_name=container_name)
print(container)
# step-3
print(swift.list_containers())
# step-4
file_path = 'goat.jpg'
object_name = 'an amazing goat'
container = swift.get_container(container_name=container_name)
object = container.upload_object(file_path=file_path, object_name=object_name)
# step-5
objects = container.list_objects()
print(objects)
# step-6
object = swift.get_object(container_name, object_name)
print object
# step-7
import hashlib
print(hashlib.md5(open('goat.jpg', 'rb').read()).hexdigest())
# step-8
swift.delete_object(object)
# step-9
objects = container.list_objects()
print(objects)
# step-10
container_name = 'fractals'
container = swift.get_container(container_name)
# step-11
import base64
import cStringIO
import json
import requests
endpoint = 'http://IP_API_1'
params = { 'results_per_page': '-1' }
response = requests.get('%s/v1/fractal' % endpoint, params=params)
data = json.loads(response.text)
for fractal in data['objects']:
response = requests.get('%s/fractal/%s' % (endpoint, fractal['uuid']), stream=True)
container.upload_object_via_stream(response.iter_content(), object_name=fractal['uuid'])
for object in container.list_objects():
print(object)
# step-12
for object in container.list_objects():
container.delete_object(object)
swift.delete_container(container)
# step-13
file_path = 'goat.jpg'
object_name = 'backup_goat.jpg'
extra = {'meta_data': {'description': 'a funny goat', 'created': '2015-06-02'}}
with open('goat.jpg', 'rb') as iterator:
object = swift.upload_object_via_stream(iterator=iterator,
container=container,
object_name=object_name,
extra=extra)
# step-14
swift.ex_multipart_upload_object(file_path, container, object_name,
chunk_size=33554432)
# step-15

View File

@ -0,0 +1,23 @@
[metadata]
name = OpenStack First Application
summary = OpenStack First Application
description-file =
README.rst
author = OpenStack Documentation
author-email = openstack-docs@lists.openstack.org
home-page = http://docs.openstack.org/
classifier =
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
[pbr]
warnerrors = True
[wheel]
universal = 1

6
openstack-firstapp/setup.py Executable file
View File

@ -0,0 +1,6 @@
#!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)

View File

@ -2,3 +2,13 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
openstack-doc-tools>=0.23
Pygments
docutils<=0.9.1
sphinx>=1.1.2,<1.2
pbr>=0.6,!=0.7,<1.0
oslosphinx
openstackdocstheme
nwdiag
blockdiag
sphinxcontrib-blockdiag
sphinxcontrib-nwdiag

20
tox.ini
View File

@ -69,3 +69,23 @@ commands = doc-tools-check-languages doc-tools-check-languages.conf test {posarg
sitepackages=True
whitelist_externals = doc-tools-check-languages
commands = doc-tools-check-languages doc-tools-check-languages.conf publish all
[testenv:openstack-firstapp-libcloud]
commands = sphinx-build -E -W -t libcloud openstack-firstapp/doc/source openstack-firstapp/build/html
[testenv:openstack-firstapp-jclouds]
commands = sphinx-build -E -W -t jclouds openstack-firstapp/doc/source openstack-firstapp/build/html
[testenv:openstack-firstapp-fog]
commands = sphinx-build -E -W -t fog openstack-firstapp/doc/source openstack-firstapp/build/html
[testenv:openstack-firstapp-dotnet]
commands = sphinx-build -E -W -t dotnet openstack-firstapp/doc/source openstack-firstapp/build/html
[testenv:openstack-firstapp-node]
commands = sphinx-build -E -W -t node openstack-firstapp/doc/source openstack-firstapp/build/html
[testenv:openstack-firstapp-openstacksdk]
commands = sphinx-build -E -W -t openstacksdk openstack-firstapp/doc/source openstack-firstapp/build/html
[testenv:openstack-firstapp-todos]
commands = sphinx-build -E -W -t libcloud openstack-firstapp/doc/source openstack-firstapp/build/html