Initial upload

This commit is contained in:
Christian Berendt 2014-12-03 09:25:41 +01:00
parent f11b5872ac
commit fd533dba7e
42 changed files with 1603 additions and 0 deletions

7
.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
*.box
config.yaml
*.log
.vagrant
*.vdi
.tox
doc/build

6
README.rst Normal file
View File

@ -0,0 +1,6 @@
packstack-vagrant
=================
This is a `Vagrant <https://www.vagrantup.com>`__ environment providing
a `Packstack <https://github.com/stackforge/packstack>`__ installation
on top of `CentOS <http://www.centos.org>`__.

88
Vagrantfile vendored Normal file
View File

@ -0,0 +1,88 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'yaml'
unless defined? CONFIG
configuration_file = File.join(File.dirname(__FILE__), "config.yaml")
CONFIG = YAML.load(File.open(configuration_file, File::RDONLY).read)
end
def add_block_device(node, port, size)
node.vm.provider "virtualbox" do |vb|
vb.customize ['createhd', '--filename', "#{node.vm.hostname}_#{port}.vdi", '--size', size]
vb.customize ['storageattach', :id, '--storagectl', CONFIG['box_storage_controller'], '--port', port, '--device', 0, '--type', 'hdd', '--medium', "#{node.vm.hostname}_#{port}.vdi"]
end
end
Vagrant.configure(2) do |config|
config.vm.box = 'packstack-template'
config.vm.synced_folder '.', '/vagrant', disabled: true
config.vm.provider "virtualbox" do |vb|
vb.customize ["modifyvm", :id, "--memory", CONFIG['resources']['memory']]
vb.customize ["modifyvm", :id, "--cpus", CONFIG['resources']['vcpus']]
vb.customize ["modifyvm", :id, "--pagefusion", "on"]
end
config.vm.provision "ansible" do |ansible|
ansible.playbook = "playbook.yaml"
end
if CONFIG['use_cache'] and Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
config.vm.define 'template' do |node|
node.vm.box = CONFIG['box']
node.vm.hostname = 'template'
end
CONFIG['address']['compute'].each_with_index do |address, index|
name = "compute#{index + 1}"
config.vm.define name do |node|
node.vm.hostname= name
node.vm.network :public_network, ip: address, netmask: CONFIG['netmask_internal'], bridge: CONFIG['bridge_internal']
node.vm.network :private_network, ip: "10.0.0.2#{index}", virtualbox__intnet: "tunnel"
end
end
['network', 'storage', 'shared'].each do |name|
config.vm.define name do |node|
node.vm.hostname = name
node.vm.network :public_network, ip: CONFIG['address'][name], netmask: CONFIG['netmask_internal'], bridge: CONFIG['bridge_internal']
if name == 'network'
node.vm.network :private_network, ip: "10.0.0.30", virtualbox__intnet: "tunnel"
node.vm.network :public_network, bridge: CONFIG['bridge_external'], auto_config: false
end
end
end
config.vm.define "nfs" do |node|
node.vm.hostname= "nfs"
node.vm.network :public_network, ip: CONFIG['address']['nfs'], netmask: CONFIG['netmask_internal'], bridge: CONFIG['bridge_internal']
add_block_device(node, 1, CONFIG['resources']['storage'])
end
config.vm.define "controller", primary: true do |node|
node.vm.hostname= "controller"
node.vm.network :public_network, ip: CONFIG['address']['controller'], netmask: CONFIG['netmask_internal'], bridge: CONFIG['bridge_internal']
node.vm.provider "virtualbox" do |vb|
vb.customize ["modifyvm", :id, "--memory", (CONFIG['resources']['memory'] * 2)]
vb.customize ["modifyvm", :id, "--cpus", (CONFIG['resources']['vcpus'] * 2)]
end
add_block_device(node, 1, CONFIG['resources']['storage'])
add_block_device(node, 2, CONFIG['resources']['storage'])
end
end

34
ansible/controller.yaml Normal file
View File

@ -0,0 +1,34 @@
---
- yum: name=http://yum.puppetlabs.com/puppetlabs-release-el-7.noarch.rpm state=present
- yum: name=git state=present
when: development
- git: repo=https://github.com/stackforge/packstack
dest=/opt/packstack
when: development
- file: path=/opt/packstack owner=vagrant group=vagrant recurse=yes
when: development
- command: python setup.py install
chdir=/opt/packstack
when: development
- command: python setup.py install_puppet_modules
chdir=/opt/packstack
when: development
- yum: name=openstack-packstack state=present
when: not development
- template: src=files/openrc.j2 dest=/home/vagrant/openrc owner=vagrant group=vagrant mode=0644
- lineinfile: dest=/home/vagrant/.bashrc line='source $HOME/openrc'
- copy: src=files/setup.sh dest=/home/vagrant/scripts/setup.sh owner=vagrant group=vagrant mode=0755
- copy: src=files/initialize.sh dest=/home/vagrant/scripts/initialize.sh mode=0755 owner=vagrant group=vagrant
- filesystem: fstype=xfs dev=/dev/sdb force=yes
- filesystem: fstype=xfs dev=/dev/sdc force=yes
- template: src=files/packstack.answers.j2 dest=/home/vagrant/packstack.answers owner=vagrant group=vagrant
- yum: name=patch state=present
when: not development
- copy: src=patches dest=/home/vagrant owner=vagrant group=vagrant mode=0644
when: not development
- patch: patchfile={{ item }} basedir=/ strip=1
when: not development
with_items:
- /home/vagrant/patches/mongodb.pp
- /home/vagrant/patches/nova_compute_libvirt.pp
- copy: src=files/gitconfig dest=/home/vagrant/.gitconfig owner=vagrant group=vagrant mode=0644

15
ansible/initialize.yaml Normal file
View File

@ -0,0 +1,15 @@
---
- yum: name=deltarpm state=present
- yum: name=https://rdo.fedorapeople.org/rdo-release.rpm state=present
- yum: name=* state=latest
- yum: name=openstack-selinux state=present
- yum: name=vim-enhanced state=present
- copy: src=files/id_rsa dest=/home/vagrant/.ssh/id_rsa mode=0600 owner=vagrant group=vagrant
- copy: src=files/id_rsa.pub dest=/home/vagrant/.ssh/id_rsa.pub mode=0600 owner=vagrant group=vagrant
- copy: src=files/selinux dest=/etc/selinux/config mode=0644 owner=root group=root
- file: path=/home/vagrant/scripts state=directory owner=vagrant group=vagrant mode=0755
- copy: src=files/disable_network_manager.sh dest=/home/vagrant/scripts/disable_network_manager.sh mode=0755 owner=vagrant group=vagrant
- shell: cat /home/vagrant/.ssh/id_rsa.pub >> /home/vagrant/.ssh/authorized_keys
- file: path=/home/vagrant/.ssh/authorized_keys owner=vagrant group=vagrant
- copy: src=files/motd dest=/etc/motd owner=root group=root mode=0644
- template: src=files/hosts.j2 dest=/etc/hosts owner=root group=root mode=0644

6
ansible/network.yaml Normal file
View File

@ -0,0 +1,6 @@
---
- yum: name=https://rdo.fedorapeople.org/rdo-release.rpm state=present
- yum: name=openvswitch state=present
- service: name=openvswitch state=started enabled=yes
- openvswitch_bridge: bridge=br-ex state=present
- openvswitch_port: bridge=br-ex port=enp0s10 state=present

11
ansible/nfs.yaml Normal file
View File

@ -0,0 +1,11 @@
---
- yum: name=nfs-utils state=present
- yum: name=xfsprogs state=present
- filesystem: fstype=xfs dev=/dev/sdb force=yes
- mount: name=/export src='/dev/sdb' fstype=xfs state=mounted
- copy: src=files/exports dest=/etc/exports mode=0644 owner=root group=root
- service: name=rpcbind state=started enabled=yes
- service: name=nfs-server state=started enabled=yes
- service: name=nfs-lock state=started enabled=yes
- service: name=nfs-idmap state=started enabled=yes
- command: exportfs -a

29
config.yaml.sample Normal file
View File

@ -0,0 +1,29 @@
---
box: b1-systems/centos-packstack
box_storage_controller: 'SATA Controller'
development: false
bridge_internal: tap0
bridge_external: tap1
network_agent: openvswitch
network_type: gre
netmask_internal: 255.255.0.0
use_cache: true
storage_backend: nfs
address:
controller: 10.100.50.10
nfs: 10.100.50.41
network: 10.100.50.30
shared: 10.100.50.50
storage: 10.100.50.40
compute:
- 10.100.50.20
- 10.100.50.21
ntp:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
resources:
memory: 4096
vcpus: 2
storage: 65536

43
contrib/config.ssh Normal file
View File

@ -0,0 +1,43 @@
# --- Packstack environment ---
Host controller
HostName 10.100.50.10
Port 22
User vagrant
IdentityFile ~/.ssh/id_rsa.packstack
Host compute1
HostName 10.100.50.20
Port 22
User vagrant
IdentityFile ~/.ssh/id_rsa.packstack
Host compute2
HostName 10.100.50.21
Port 22
User vagrant
IdentityFile ~/.ssh/id_rsa.packstack
Host network
HostName 10.100.50.30
Port 22
User vagrant
IdentityFile ~/.ssh/id_rsa.packstack
Host storage
HostName 10.100.50.40
Port 22
User vagrant
IdentityFile ~/.ssh/id_rsa.packstack
Host nfs
HostName 10.100.50.41
Port 22
User vagrant
IdentityFile ~/.ssh/id_rsa.packstack
Host shared
HostName 10.100.50.50
Port 22
User vagrant
IdentityFile ~/.ssh/id_rsa.packstack

50
doc/source/conf.py Executable file
View File

@ -0,0 +1,50 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'oslosphinx'
]
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'packstack-vagrant'
copyright = u'2014, B1 Systems GmbH'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project

View File

@ -0,0 +1,131 @@
Configuration
=============
Copy the sample configuration file ``config.yaml.sample`` to
``config.yaml`` and adjust the file accordingly.
Networking
----------
Bridges
~~~~~~~
::
bridge_internal: tap0
bridge_external: tap1
Addresses
~~~~~~~~~
::
netmask_internal: 255.255.0.0
address:
controller: 10.100.50.10
nfs: 10.100.50.41
network: 10.100.50.30
shared: 10.100.50.50
storage: 10.100.50.40
compute:
- 10.100.50.20
- 10.100.50.21
To increase the number of compute nodes add more addresses to the
``compute`` list.
L2 agent
~~~~~~~~
::
network_agent: openvswitch
Tenant networks
~~~~~~~~~~~~~~~
::
network_type: gre
Clock synchronization (NTP)
---------------------------
Accurate clocks on every node are very important. Default is the the
usage of the `NTP Pool Project <http://www.pool.ntp.org/en/use.html>`__.
::
ntp:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
Resources
---------
Default resources defined in ``~/.vagrant.d/Vagrantfile`` or the
``Vagrantfile`` provided by the base box will be overwritten. Resources
assigned to the controller node will be multiplied by two and additional
block storage devices will be attached to the controller and NFS nodes.
::
resources:
memory: 4096
vcpus: 2
storage: 65536
Development version
-------------------
To use the development version (``master``) of Packstack set
``development: true``.
Vagrant base box
----------------
In theory (not tested) it is possible to use RHEL or Fedora instead of
CentOS. Default is ``b1-systems/centos-packstack``, a customized
Vagrantbox based on
`boxcutter/centos70 <https://github.com/box-cutter/centos-vm>`__. The
sources of the used `Packer <https://packer.io/>`__ template is
available on `Github <https://github.com/b1-systems/packer-templates>`__
and on the
`Vagrantcloud <https://github.com/b1-systems/packer-templates>`__.
To change the used Vagrant base box modify the value of ``box``. A list
of public available Vagrant boxes is available at
https://vagrantcloud.com.
Depending on the used base box you have to set the used storage
controller (normally ``IDE Controller`` or ``SATA Controller``). The
storage controller of the base box must support at least three ports.
Storage backend
---------------
At the moment NFS is the only supported storage backend. Support for
Ceph will be added in the future (at the moment Ceph is not supported as
a storage backend in Packstack).
Timezone
--------
At the moment it is not possible to configure the timezone with
Packstack.
Caching
-------
To speed up the provisioning the Vagrant plugin
`vagrant-cachier <https://github.com/fgrehm/vagrant-cachier>`__
can be used.
::
$ vagrant plugin install vagrant-cachier
When the plugin is installed caching is enabled by default. To explicitly
disable caching when the plugin is installed set ``use_cache: false``.

View File

@ -0,0 +1,12 @@
Contributing
============
Details how to contribute are available in the
`OpenStack infrastructure manual <http://docs.openstack.org/infra/manual/developers.html>`__.
Changes be submitted for review via the Gerrit tool, following the workflow
documented in the
`OpenStack infrastructure manual <http://docs.openstack.org/infra/manual/developers.html#development-workflow>`__.
Bugs and feature requests should be filed on
`Launchpad <https://launchpad.net/packstack-vagrant.>`__, not GitHub:

22
doc/source/index.rst Normal file
View File

@ -0,0 +1,22 @@
Welcome to packstack-vagrant's documentation!
=============================================
This is a `Vagrant <https://www.vagrantup.com>`__ environment providing
a `Packstack <https://github.com/stackforge/packstack>`__ installation
on top of `CentOS <http://www.centos.org>`__.
Contents:
.. toctree::
:maxdepth: 2
requirements
configuration
initialization
usage
contributing
Indices and tables
==================
* :ref:`search`

View File

@ -0,0 +1,33 @@
Initialization
==============
First run the ``bootstrap.sh`` script to prepare all required nodes.
::
$ ./scripts/bootstrap.sh
Afterwards run the following command on the controller node
(``vagrant ssh controller``) to deploy OpenStack with Packstack.
::
$ packstack --answer-file packstack.answers
Run ``packstack`` with ``--debug`` to enable debug logging.
::
$ packstack --debug --answer-file packstack.answers
- The installation log file is available at:
``/var/tmp/packstack/YYMMDD-HHMMSS-abcdef/openstack-setup.log``
- The generated manifests are available at:
``/var/tmp/packstack/YYMMDD-HHMMSS-abcdef/manifests``
Optionally you can run the ``setup.sh`` script after the successful
deployment to add cloud images etc. pp.
::
$ ./scripts/setup.sh

1
doc/source/readme.rst Normal file
View File

@ -0,0 +1 @@
.. include:: ../../README.rst

View File

@ -0,0 +1,19 @@
Requirements
============
The installation of Vagrant is documented in the `Vagrant
documentation <https://docs.vagrantup.com/v2/installation/index.html>`__.
The used provisioner is `Ansible <http://www.ansible.com>`__. To be able
to start this Vagrant environment install Ansible on the Vagrant host.
::
$ sudo yum install -y ansible
A helper script (`scripts/get_hosts.py`) requires the Python library
`PyYAML <https://pypi.python.org/pypi/PyYAML/3.11>`__.
::
$ sudo yum install -y PyYAML

30
doc/source/usage.rst Normal file
View File

@ -0,0 +1,30 @@
Usage
=====
Credentials
-----------
- The password for the OpenStack user ``admin`` in the tenant ``admin``
is ``password``.
- The password for the Nagios user ``nagiosadmin`` is ``password``.
Webinterfaces
-------------
- The OpenStack Dashboard is available on the controller node, by
default at http://10.100.50.10/dashboard/.
- The webinterface of Nagios is available on the controller node, by
default at http://10.100.50.10/nagios/.
CLIs
----
All command line interfaces are installed on the controller node.
APIs
----
All OpenStack API services are running on the controller node with the
default IP address ``10.100.50.10``.

5
files/bootstrap.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
# https://openstack.redhat.com/Quickstart
packstack --answer-file=packstack.answers

View File

@ -0,0 +1,5 @@
#!/bin/sh
for f in /etc/sysconfig/network-scripts/ifcfg-enp*; do
echo "NM_CONTROLLED=no" | sudo tee -a $f
done

1
files/exports Normal file
View File

@ -0,0 +1 @@
/export 10.100.50.0/16(rw,fsid=0,no_subtree_check,sync,no_root_squash)

5
files/gitconfig Normal file
View File

@ -0,0 +1,5 @@
[user]
name = packstack-vagrant
email = vagrant@controller
[push]
default = simple

11
files/hosts.j2 Normal file
View File

@ -0,0 +1,11 @@
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
# <<< Packstack >>>
{% for name in ['controller', 'network', 'storage', 'nfs', 'shared'] %}
{{ address[name] }} {{ name }}
{% endfor %}
{% for ip in address['compute'] %}
{{ ip }} compute{{ loop.index }}
{% endfor %}
# >>> Packstack <<<

27
files/id_rsa Normal file
View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA1nWq2oUYWBkVyQj0izl/9xUvMBAF4mgikJGaPnbPUxByc4/3
NbwMin8BL3za5v3uV7jqMzUP78AF3mLIyaQcAoSRAfpP4gWJ1VihRhJWYQQadTfb
NNoHpB2eCnWhoTt+dQJC/jIV79SCNaufeYgiftH1fzLZNUJBmYZ0xtR+G+bfFbu/
O/4qKRgf7S3hUB5ka9Cq8u38RZm6T1O5k4wxyXvGsI4qleVQFioPSE+z0VFggfn0
6ue4556EALsuvmBdGvg8NerGwAV0VT11kSBLncPdNhn6AqDQx4tFR6nNTnNEQTuE
11pmFMQV+mdNZHQvaIGN972oisKq6FXox5avwQIDAQABAoIBAQCOJA0qql5ZPb8X
sKzMfj7XWB4ff+82gYivQyH1Vq/RQSxqdbgG0G2LeeQiQmGgChvn3nKzn+7BHAeH
vveP1gqOjKjDo9FAYCVb11OE6vB/esXCfVP3sq6v0sWF7zNCdvk4ulgGvNGXVMU8
p1Saq2UZozVQqjYWu6RZCinqArrW6DbCMoGP8nd2zIQLB+73bUmmRwT7zwJjMC3f
bm2ehQTesrtINJPivLqDdBhLj6WAw6TVnxJGTwW9ovlXXUNYY4D1p1xe+JXRWOLp
AwjOD2auqN2XUfF/Wqq2SDC3ExOxozr3aVJkPOZz0f6qWUwP2urja3gbf//h9IG4
NklPNZk1AoGBAO0tFhJssvJ5v4rxRseIg8K72OwRCmxl6PA1f+hS9hGh0yU1jB6o
Tfh/9YLlwK1aZykjch4zwn5RH6qg2fzMfUpiXNZ6p055rZwZMa9QUR4Vw4uE9CFe
RAWNN9czNOUDUjUejo+gIJsTOUE2QcH4wU1NdsPIkTQznf74zUxrknIvAoGBAOd7
B9oX6576+obsr4Srb+KJghQZTnDv6Yzf6MfVqsA6AH4aBJkVbB95B0T2auEk0/zu
xuCRN+lIKNgTcS1ioeBLa6EyJj0lDtDNp0coQuHkfHXUlTZxVbQXAxaro+2HVqnn
P7KdZTaBvjVmw01InURck6jTo76SCqE3hhpZiTEPAoGAWT9O/+3hRHblyc3S2YCY
PC3dia874eBTUBiBohWdvLLNNkI5EMw6UtkrtG7qGxFaolNRPOYzOL8kEFvoy0TT
nKr4zkhnLpOA7a1ZUeQCMjT0WUrVZr0HEaO7MIJ3U/n09EIkWQ1jV5dbQxu8kNYf
Pwx4NWICqccGnAxZjHkqKUcCgYAtB7z+Szr14WMNDkwSpfsM+a0JVebxh2pKkmBJ
R6KA5IemAE+2q/ktojCs1Lbz92MZyCV+GCHlSrlPV3Dj4FUXOfTFFelkaaoZKniJ
GibwpK8liOZCAggnEXbzWQ55raNJSAgXKhE/ajvjTYtepGqbmtrKs+kdsHFK7W8z
/wj2NwKBgAOxCb1bYpMa/X9LLdMD7HkZmXwNgSTrjAHD4FCzYmRNf2KxUZ+PLBhR
iB9FmZLB8dBPG8fdboaZFvubRqGAbUAtPSM0aWV9/Cs4fmngiL4CjS2H8yTlL2D4
HbVYPSl3qhwq4gaRMm5QCM6hQ2UCkRvQllDt8Shziszsz7YPZWM8
-----END RSA PRIVATE KEY-----

1
files/id_rsa.pub Normal file
View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDWdarahRhYGRXJCPSLOX/3FS8wEAXiaCKQkZo+ds9TEHJzj/c1vAyKfwEvfNrm/e5XuOozNQ/vwAXeYsjJpBwChJEB+k/iBYnVWKFGElZhBBp1N9s02gekHZ4KdaGhO351AkL+MhXv1II1q595iCJ+0fV/Mtk1QkGZhnTG1H4b5t8Vu787/iopGB/tLeFQHmRr0Kry7fxFmbpPU7mTjDHJe8awjiqV5VAWKg9IT7PRUWCB+fTq57jnnoQAuy6+YF0a+Dw16sbABXRVPXWRIEudw902GfoCoNDHi0VHqc1Oc0RBO4TXWmYUxBX6Z01kdC9ogY33vaiKwqroVejHlq/B

14
files/initialize.sh Normal file
View File

@ -0,0 +1,14 @@
#!/bin/sh
for node in $(sed -n '/<<< Packstack >>>/{:a;n;/>>> Packstack <<</b;p;ba}' /etc/hosts | awk '{ print $2 }'); do
ssh-keyscan $node >> /home/vagrant/.ssh/known_hosts
done
chown vagrant:vagrant /home/vagrant/.ssh/known_hosts
for node in $(sed -n '/<<< Packstack >>>/{:a;n;/>>> Packstack <<</b;p;ba}' /etc/hosts | awk '{ print $2 }'); do
ssh $node "sudo mkdir -p /root/.ssh"
ssh $node "sudo chmod 700 /root/.ssh"
ssh $node "sudo cp /home/vagrant/.ssh/* /root/.ssh"
ssh $node "sudo chown -R root:root /root/.ssh"
done

6
files/motd Normal file
View File

@ -0,0 +1,6 @@
____ _ ____ _ ______ _____ _ ____ _ __
| _ \ / \ / ___| |/ / ___|_ _|/ \ / ___| |/ /
| |_) / _ \| | | ' /\___ \ | | / _ \| | | ' /
| __/ ___ \ |___| . \ ___) || |/ ___ \ |___| . \
|_| /_/ \_\____|_|\_\____/ |_/_/ \_\____|_|\_\

5
files/openrc.j2 Normal file
View File

@ -0,0 +1,5 @@
export OS_USERNAME=admin
export OS_TENANT_NAME=admin
export OS_PASSWORD=password
export OS_AUTH_URL=http://{{ address.controller }}:5000/v2.0/
export OS_REGION_NAME=RegionOne

673
files/packstack.answers.j2 Normal file
View File

@ -0,0 +1,673 @@
[general]
# Path to a Public key to install on servers. If a usable key has not
# been installed on the remote servers the user will be prompted for a
# password and this key will be installed so the password will not be
# required again
CONFIG_SSH_KEY=/home/vagrant/.ssh/id_rsa.pub
# Set a default password everywhere. The default password will be
# overriden by whatever password is set for each individual service or
# user.
CONFIG_DEFAULT_PASSWORD=password
# Set to 'y' if you would like Packstack to install MariaDB
CONFIG_MARIADB_INSTALL=y
# Set to 'y' if you would like Packstack to install OpenStack Image
# Service (Glance)
CONFIG_GLANCE_INSTALL=y
# Set to 'y' if you would like Packstack to install OpenStack Block
# Storage (Cinder)
CONFIG_CINDER_INSTALL=y
# Set to 'y' if you would like Packstack to install OpenStack Compute
# (Nova)
CONFIG_NOVA_INSTALL=y
# Set to 'y' if you would like Packstack to install OpenStack
# Networking (Neutron). Otherwise Nova Network will be used.
CONFIG_NEUTRON_INSTALL=y
# Set to 'y' if you would like Packstack to install OpenStack
# Dashboard (Horizon)
CONFIG_HORIZON_INSTALL=y
# Set to 'y' if you would like Packstack to install OpenStack Object
# Storage (Swift)
CONFIG_SWIFT_INSTALL=y
# Set to 'y' if you would like Packstack to install OpenStack
# Metering (Ceilometer)
CONFIG_CEILOMETER_INSTALL=y
# Set to 'y' if you would like Packstack to install OpenStack
# Orchestration (Heat)
CONFIG_HEAT_INSTALL=y
# Set to 'y' if you would like Packstack to install the OpenStack
# Client packages. An admin "rc" file will also be installed
CONFIG_CLIENT_INSTALL=y
# Comma separated list of NTP servers. Leave plain if Packstack
# should not install ntpd on instances.
CONFIG_NTP_SERVERS={{ ntp | join(",") }}
# Set to 'y' if you would like Packstack to install Nagios to monitor
# OpenStack hosts
CONFIG_NAGIOS_INSTALL=y
# Comma separated list of servers to be excluded from installation in
# case you are running Packstack the second time with the same answer
# file and don't want Packstack to touch these servers. Leave plain if
# you don't need to exclude any server.
EXCLUDE_SERVERS=
# Set to 'y' if you want to run OpenStack services in debug mode.
# Otherwise set to 'n'.
CONFIG_DEBUG_MODE=y
# The IP address of the server on which to install OpenStack services
# specific to controller role such as API servers, Horizon, etc.
CONFIG_CONTROLLER_HOST={{ address.controller }}
# The list of IP addresses of the server on which to install the Nova
# compute service
CONFIG_COMPUTE_HOSTS={{ address.compute | join(",") }}
# The list of IP addresses of the server on which to install the
# network service such as Nova network or Neutron
CONFIG_NETWORK_HOSTS={{ address.network }}
# Set to 'y' if you want to use VMware vCenter as hypervisor and
# storage. Otherwise set to 'n'.
CONFIG_VMWARE_BACKEND=n
# Set to 'y' if you want to use unsupported parameters. This should
# be used only if you know what you are doing.Issues caused by using
# unsupported options won't be fixed before next major release.
CONFIG_UNSUPPORTED=y
# The IP address of the VMware vCenter server
CONFIG_VCENTER_HOST=
# The username to authenticate to VMware vCenter server
CONFIG_VCENTER_USER=
# The password to authenticate to VMware vCenter server
CONFIG_VCENTER_PASSWORD=
# The name of the vCenter cluster
CONFIG_VCENTER_CLUSTER_NAME=
# (Unsupported!) The IP address of the server on which to install
# OpenStack services specific to storage servers such as Glance and
# Cinder.
CONFIG_STORAGE_HOST={{ address.storage }}
# To subscribe each server to EPEL enter "y"
CONFIG_USE_EPEL=n
# A comma separated list of URLs to any additional yum repositories
# to install
CONFIG_REPO=
# To subscribe each server with Red Hat subscription manager, include
# this with CONFIG_RH_PW
CONFIG_RH_USER=
# To subscribe each server with RHN Satellite,fill Satellite's URL
# here. Note that either satellite's username/password or activation
# key has to be provided
CONFIG_SATELLITE_URL=
# To subscribe each server with Red Hat subscription manager, include
# this with CONFIG_RH_USER
CONFIG_RH_PW=
# To enable RHEL optional repos use value "y"
CONFIG_RH_OPTIONAL=n
# Specify a HTTP proxy to use with Red Hat subscription manager
CONFIG_RH_PROXY=
# Specify port of Red Hat subscription manager HTTP proxy
CONFIG_RH_PROXY_PORT=
# Specify a username to use with Red Hat subscription manager HTTP
# proxy
CONFIG_RH_PROXY_USER=
# Specify a password to use with Red Hat subscription manager HTTP
# proxy
CONFIG_RH_PROXY_PW=
# Username to access RHN Satellite
CONFIG_SATELLITE_USER=
# Password to access RHN Satellite
CONFIG_SATELLITE_PW=
# Activation key for subscription to RHN Satellite
CONFIG_SATELLITE_AKEY=
# Specify a path or URL to a SSL CA certificate to use
CONFIG_SATELLITE_CACERT=
# If required specify the profile name that should be used as an
# identifier for the system in RHN Satellite
CONFIG_SATELLITE_PROFILE=
# Comma separated list of flags passed to rhnreg_ks. Valid flags are:
# novirtinfo, norhnsd, nopackages
CONFIG_SATELLITE_FLAGS=
# Specify a HTTP proxy to use with RHN Satellite
CONFIG_SATELLITE_PROXY=
# Specify a username to use with an authenticated HTTP proxy
CONFIG_SATELLITE_PROXY_USER=
# Specify a password to use with an authenticated HTTP proxy.
CONFIG_SATELLITE_PROXY_PW=
# Set the AMQP service backend. Allowed values are: qpid, rabbitmq
CONFIG_AMQP_BACKEND=rabbitmq
# The IP address of the server on which to install the AMQP service
CONFIG_AMQP_HOST={{ address.shared }}
# Enable SSL for the AMQP service
CONFIG_AMQP_ENABLE_SSL=n
# Enable Authentication for the AMQP service
CONFIG_AMQP_ENABLE_AUTH=n
# The password for the NSS certificate database of the AMQP service
CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
# The port in which the AMQP service listens to SSL connections
CONFIG_AMQP_SSL_PORT=5671
# The filename of the certificate that the AMQP service is going to
# use
CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
# The filename of the private key that the AMQP service is going to
# use
CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem
# Auto Generates self signed SSL certificate and key
CONFIG_AMQP_SSL_SELF_SIGNED=y
# User for amqp authentication
CONFIG_AMQP_AUTH_USER=amqp_user
# Password for user authentication
CONFIG_AMQP_AUTH_PASSWORD=password
# The IP address of the server on which to install MariaDB or IP
# address of DB server to use if MariaDB installation was not selected
CONFIG_MARIADB_HOST={{ address.shared }}
# Username for the MariaDB admin user
CONFIG_MARIADB_USER=root
# Password for the MariaDB admin user
CONFIG_MARIADB_PW=password
# The password to use for the Keystone to access DB
CONFIG_KEYSTONE_DB_PW=password
# Region name
CONFIG_KEYSTONE_REGION=RegionOne
# The token to use for the Keystone service api
CONFIG_KEYSTONE_ADMIN_TOKEN=password
# The password to use for the Keystone admin user
CONFIG_KEYSTONE_ADMIN_PW=password
# The password to use for the Keystone demo user
CONFIG_KEYSTONE_DEMO_PW=password
# Kestone token format. Use either UUID or PKI
CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
# Name of service to use to run keystone (keystone or httpd)
CONFIG_KEYSTONE_SERVICE_NAME=keystone
# The password to use for the Glance to access DB
CONFIG_GLANCE_DB_PW=password
# The password to use for the Glance to authenticate with Keystone
CONFIG_GLANCE_KS_PW=password
# Glance storage backend controls how Glance stores disk images.
# Supported values: file, swift. Note that Swift installation have to
# be enabled to have swift backend working. Otherwise Packstack will
# fallback to 'file'.
CONFIG_GLANCE_BACKEND=file
# The password to use for the Cinder to access DB
CONFIG_CINDER_DB_PW=password
# The password to use for the Cinder to authenticate with Keystone
CONFIG_CINDER_KS_PW=password
# The Cinder backend to use, valid options are: lvm, gluster, nfs,
# netapp
CONFIG_CINDER_BACKEND=nfs
# Create Cinder's volumes group. This should only be done for testing
# on a proof-of-concept installation of Cinder. This will create a
# file-backed volume group and is not suitable for production usage.
CONFIG_CINDER_VOLUMES_CREATE=y
# Cinder's volumes group size. Note that actual volume size will be
# extended with 3% more space for VG metadata.
CONFIG_CINDER_VOLUMES_SIZE=20G
# A single or comma separated list of gluster volume shares to mount,
# eg: ip-address:/vol-name, domain:/vol-name
CONFIG_CINDER_GLUSTER_MOUNTS=
# A single or comma seprated list of NFS exports to mount, eg: ip-
# address:/export-name
CONFIG_CINDER_NFS_MOUNTS={{ address.nfs }}:/export
# (required) Administrative user account name used to access the
# storage system or proxy server.
CONFIG_CINDER_NETAPP_LOGIN=
# (required) Password for the administrative user account specified
# in the netapp_login parameter.
CONFIG_CINDER_NETAPP_PASSWORD=
# (required) The hostname (or IP address) for the storage system or
# proxy server.
CONFIG_CINDER_NETAPP_HOSTNAME=
# (optional) The TCP port to use for communication with ONTAPI on the
# storage system. Traditionally, port 80 is used for HTTP and port 443
# is used for HTTPS; however, this value should be changed if an
# alternate port has been configured on the storage system or proxy
# server. Defaults to 80.
CONFIG_CINDER_NETAPP_SERVER_PORT=80
# (optional) The storage family type used on the storage system;
# valid values are ontap_7mode for using Data ONTAP operating in
# 7-Mode or ontap_cluster for using clustered Data ONTAP, or eseries
# for NetApp E-Series. Defaults to ontap_cluster.
CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
# (optional) The transport protocol used when communicating with
# ONTAPI on the storage system or proxy server. Valid values are http
# or https. Defaults to http.
CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
# (optional) The storage protocol to be used on the data path with
# the storage system; valid values are iscsi or nfs. Defaults to nfs.
CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
# (optional) The quantity to be multiplied by the requested volume
# size to ensure enough space is available on the virtual storage
# server (Vserver) to fulfill the volume creation request. Defaults
# to 1.0.
CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
# (optional) This parameter specifies the threshold for last access
# time for images in the NFS image cache. When a cache cleaning cycle
# begins, images in the cache that have not been accessed in the last
# M minutes, where M is the value of this parameter, will be deleted
# from the cache to create free space on the NFS share. Defaults to
# 720.
CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
# (optional) If the percentage of available space for an NFS share
# has dropped below the value specified by this parameter, the NFS
# image cache will be cleaned. Defaults to 20
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
# (optional) When the percentage of available space on an NFS share
# has reached the percentage specified by this parameter, the driver
# will stop clearing files from the NFS image cache that have not been
# accessed in the last M minutes, where M is the value of the
# expiry_thres_minutes parameter. Defaults to 60.
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
# (optional) File with the list of available NFS shares. Defaults
# to ''.
CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=
# (optional) This parameter is only utilized when the storage
# protocol is configured to use iSCSI. This parameter is used to
# restrict provisioning to the specified controller volumes. Specify
# the value of this parameter to be a comma separated list of NetApp
# controller volume names to be used for provisioning. Defaults to
# ''.
CONFIG_CINDER_NETAPP_VOLUME_LIST=
# (optional) The vFiler unit on which provisioning of block storage
# volumes will be done. This parameter is only used by the driver when
# connecting to an instance with a storage family of Data ONTAP
# operating in 7-Mode and the storage protocol selected is iSCSI. Only
# use this parameter when utilizing the MultiStore feature on the
# NetApp storage system. Defaults to ''.
CONFIG_CINDER_NETAPP_VFILER=
# (optional) This parameter specifies the virtual storage server
# (Vserver) name on the storage cluster on which provisioning of block
# storage volumes should occur. If using the NFS storage protocol,
# this parameter is mandatory for storage service catalog support
# (utilized by Cinder volume type extra_specs support). If this
# parameter is specified, the exports belonging to the Vserver will
# only be used for provisioning in the future. Block storage volumes
# on exports not belonging to the Vserver specified by this parameter
# will continue to function normally. Defaults to ''.
CONFIG_CINDER_NETAPP_VSERVER=
# (optional) This option is only utilized when the storage family is
# configured to eseries. This option is used to restrict provisioning
# to the specified controllers. Specify the value of this option to be
# a comma separated list of controller hostnames or IP addresses to be
# used for provisioning. Defaults to ''.
CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
# (optional) Password for the NetApp E-Series storage array. Defaults
# to ''.
CONFIG_CINDER_NETAPP_SA_PASSWORD=
# (optional) This option is used to specify the path to the E-Series
# proxy application on a proxy server. The value is combined with the
# value of the netapp_transport_type, netapp_server_hostname, and
# netapp_server_port options to create the URL used by the driver to
# connect to the proxy application. Defaults to '/devmgr/v2'.
CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
# (optional) This option is used to restrict provisioning to the
# specified storage pools. Only dynamic disk pools are currently
# supported. Specify the value of this option to be a comma separated
# list of disk pool names to be used for provisioning. Defaults to
# ''.
CONFIG_CINDER_NETAPP_STORAGE_POOLS=
# The password to use for the Nova to access DB
CONFIG_NOVA_DB_PW=password
# The password to use for the Nova to authenticate with Keystone
CONFIG_NOVA_KS_PW=password
# The overcommitment ratio for virtual to physical CPUs. Set to 1.0
# to disable CPU overcommitment
CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
# The overcommitment ratio for virtual to physical RAM. Set to 1.0 to
# disable RAM overcommitment
CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
# Protocol used for instance migration. Allowed values are tcp and
# ssh. Note that by defaul nova user is created with /sbin/nologin
# shell so that ssh protocol won't be working. To make ssh protocol
# work you have to fix nova user on compute hosts manually.
CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
# Private interface for Flat DHCP on the Nova compute servers
CONFIG_NOVA_COMPUTE_PRIVIF=eth1
# Nova network manager
CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
# Public interface on the Nova network server
CONFIG_NOVA_NETWORK_PUBIF=eth0
# Private interface for network manager on the Nova network server
CONFIG_NOVA_NETWORK_PRIVIF=eth1
# IP Range for network manager
CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
# IP Range for Floating IP's
CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
# Name of the default floating pool to which the specified floating
# ranges are added to
CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL=nova
# Automatically assign a floating IP to new instances
CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
# First VLAN for private networks
CONFIG_NOVA_NETWORK_VLAN_START=100
# Number of networks to support
CONFIG_NOVA_NETWORK_NUMBER=1
# Number of addresses in each private subnet
CONFIG_NOVA_NETWORK_SIZE=255
# The password to use for Neutron to authenticate with Keystone
CONFIG_NEUTRON_KS_PW=password
# The password to use for Neutron to access DB
CONFIG_NEUTRON_DB_PW=password
# The name of the bridge that the Neutron L3 agent will use for
# external traffic, or 'provider' if using provider networks
CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
# The name of the L2 plugin to be used with Neutron. (eg.
# linuxbridge, openvswitch, ml2)
CONFIG_NEUTRON_L2_PLUGIN=ml2
# Neutron metadata agent password
CONFIG_NEUTRON_METADATA_PW=password
# Set to 'y' if you would like Packstack to install Neutron LBaaS
CONFIG_LBAAS_INSTALL=y
# Set to 'y' if you would like Packstack to install Neutron L3
# Metering agent
CONFIG_NEUTRON_METERING_AGENT_INSTALL=y
# Whether to configure neutron Firewall as a Service
CONFIG_NEUTRON_FWAAS=y
# A comma separated list of network type driver entrypoints to be
# loaded from the neutron.ml2.type_drivers namespace.
CONFIG_NEUTRON_ML2_TYPE_DRIVERS={{ network_type }},flat
# A comma separated ordered list of network_types to allocate as
# tenant networks. The value 'local' is only useful for single-box
# testing but provides no connectivity between hosts.
CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES={{ network_type }}
# A comma separated ordered list of networking mechanism driver
# entrypoints to be loaded from the neutron.ml2.mechanism_drivers
# namespace.
CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS={{ network_agent }}
# A comma separated list of physical_network names with which flat
# networks can be created. Use * to allow flat networks with arbitrary
# physical_network names.
CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
# A comma separated list of <physical_network>:<vlan_min>:<vlan_max>
# or <physical_network> specifying physical_network names usable for
# VLAN provider and tenant networks, as well as ranges of VLAN tags on
# each available for allocation to tenant networks.
CONFIG_NEUTRON_ML2_VLAN_RANGES=physnet:1000:2000
# A comma separated list of <tun_min>:<tun_max> tuples enumerating
# ranges of GRE tunnel IDs that are available for tenant network
# allocation. Should be an array with tun_max +1 - tun_min > 1000000
CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=1000:2000
# Multicast group for VXLAN. If unset, disables VXLAN enable sending
# allocate broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode. Should be an
# Multicast IP (v4 or v6) address.
CONFIG_NEUTRON_ML2_VXLAN_GROUP=
# A comma separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network
# allocation. Min value is 0 and Max value is 16777215.
CONFIG_NEUTRON_ML2_VNI_RANGES=10:100
# The name of the L2 agent to be used with Neutron
CONFIG_NEUTRON_L2_AGENT={{ network_agent }}
# The type of network to allocate for tenant networks (eg. vlan,
# local)
CONFIG_NEUTRON_LB_TENANT_NETWORK_TYPE=vlan
# A comma separated list of VLAN ranges for the Neutron linuxbridge
# plugin (eg. physnet1:1:4094,physnet2,physnet3:3000:3999)
CONFIG_NEUTRON_LB_VLAN_RANGES=physnet:1000:2000
# A comma separated list of interface mappings for the Neutron
# linuxbridge plugin (eg. physnet1:br-eth1,physnet2:br-eth2,physnet3
# :br-eth3)
CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=physnet:br-enp0s9
# Type of network to allocate for tenant networks (eg. vlan, local,
# gre, vxlan)
CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE={{ network_type }}
# A comma separated list of VLAN ranges for the Neutron openvswitch
# plugin (eg. physnet1:1:4094,physnet2,physnet3:3000:3999)
CONFIG_NEUTRON_OVS_VLAN_RANGES=physnet:1000:2000
# A comma separated list of bridge mappings for the Neutron
# openvswitch plugin (eg. physnet1:br-eth1,physnet2:br-eth2,physnet3
# :br-eth3)
CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet:br-enp0s9
# A comma separated list of colon-separated OVS bridge:interface
# pairs. The interface will be added to the associated bridge.
CONFIG_NEUTRON_OVS_BRIDGE_IFACES=
# A comma separated list of tunnel ranges for the Neutron openvswitch
# plugin (eg. 1:1000)
CONFIG_NEUTRON_OVS_TUNNEL_RANGES=1000:2000
# The interface for the OVS tunnel. Packstack will override the IP
# address used for tunnels on this hypervisor to the IP found on the
# specified interface. (eg. eth1)
CONFIG_NEUTRON_OVS_TUNNEL_IF=enp0s9
# VXLAN UDP port
CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=
# To set up Horizon communication over https set this to 'y'
CONFIG_HORIZON_SSL=n
# PEM encoded certificate to be used for ssl on the https server,
# leave blank if one should be generated, this certificate should not
# require a passphrase
CONFIG_SSL_CERT=
# SSL keyfile corresponding to the certificate if one was entered
CONFIG_SSL_KEY=
# PEM encoded CA certificates from which the certificate chain of the
# server certificate can be assembled.
CONFIG_SSL_CACHAIN=
# The password to use for the Swift to authenticate with Keystone
CONFIG_SWIFT_KS_PW=password
# A comma separated list of devices which to use as Swift Storage
# device. Each entry should take the format /path/to/dev, for example
# /dev/vdb will install /dev/vdb as Swift storage device (packstack
# does not create the filesystem, you must do this first). If value is
# omitted Packstack will create a loopback device for test setup
CONFIG_SWIFT_STORAGES=/dev/sdb,/dev/sdc
# Number of swift storage zones, this number MUST be no bigger than
# the number of storage devices configured
CONFIG_SWIFT_STORAGE_ZONES=2
# Number of swift storage replicas, this number MUST be no bigger
# than the number of storage zones configured
CONFIG_SWIFT_STORAGE_REPLICAS=2
# FileSystem type for storage nodes
CONFIG_SWIFT_STORAGE_FSTYPE=xfs
# Shared secret for Swift
CONFIG_SWIFT_HASH=password
# Size of the swift loopback file storage device
CONFIG_SWIFT_STORAGE_SIZE=2G
# Whether to provision for demo usage and testing. Note that
# provisioning is only supported for all-in-one installations.
CONFIG_PROVISION_DEMO=n
# Whether to configure tempest for testing
CONFIG_PROVISION_TEMPEST=y
# The name of the Tempest Provisioning user. If you don't provide a
# user name, Tempest will be configured in a standalone mode
CONFIG_PROVISION_TEMPEST_USER=
# The password to use for the Tempest Provisioning user
CONFIG_PROVISION_TEMPEST_USER_PW=password
# The CIDR network address for the floating IP subnet
CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
# The uri of the tempest git repository to use
CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
# The revision of the tempest git repository to use
CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
# Whether to configure the ovs external bridge in an all-in-one
# deployment
CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
# The password used by Heat user to authenticate against MySQL
CONFIG_HEAT_DB_PW=password
# The encryption key to use for authentication info in database
CONFIG_HEAT_AUTH_ENC_KEY=password
# The password to use for the Heat to authenticate with Keystone
CONFIG_HEAT_KS_PW=password
# Set to 'y' if you would like Packstack to install Heat CloudWatch
# API
CONFIG_HEAT_CLOUDWATCH_INSTALL=y
# Set to 'y' if you would like Packstack to install Heat with trusts
# as deferred auth method. If not, the stored password method will be
# used.
CONFIG_HEAT_USING_TRUSTS=y
# Set to 'y' if you would like Packstack to install Heat
# CloudFormation API
CONFIG_HEAT_CFN_INSTALL=n
# Name of Keystone domain for Heat
CONFIG_HEAT_DOMAIN=heat
# Name of Keystone domain admin user for Heat
CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
# Password for Keystone domain admin user for Heat
CONFIG_HEAT_DOMAIN_PASSWORD=password
# Secret key for signing metering messages
CONFIG_CEILOMETER_SECRET=password
# The password to use for Ceilometer to authenticate with Keystone
CONFIG_CEILOMETER_KS_PW=password
# The IP address of the server on which to install MongoDB
CONFIG_MONGODB_HOST={{ address.shared }}
# The password of the nagiosadmin user on the Nagios server
CONFIG_NAGIOS_PW=password

11
files/selinux Normal file
View File

@ -0,0 +1,11 @@
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of these two values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted

17
files/setup.sh Normal file
View File

@ -0,0 +1,17 @@
#!/bin/sh
source /home/vagrant/openrc
glance image-create --name "Fedora 21" --disk-format qcow2 --container-format bare --is-public True --copy http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
glance image-create --name "Ubuntu 14.04" --disk-format qcow2 --container-format bare --is-public True --copy https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
glance image-create --name "CentOS 7" --disk-format qcow2 --container-format bare --is-public True --copy http://cloud.centos.org/centos/7/devel/CentOS-7-x86_64-GenericCloud.qcow2
nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
nova keypair-add --pub_key /home/vagrant/.ssh/id_rsa.pub default
neutron net-create internal001
neutron subnet-create --name internal001 internal001 192.168.200.0/24
neutron router-create internal001
neutron router-interface-add internal001 internal001
neutron net-create external001 --shared --router:external True --provider:physical_network external --provider:network_type flat
neutron subnet-create external001 --name external001 --allocation-pool start=203.0.113.100,end=203.0.113.200 --disable-dhcp --gateway 203.0.113.1 203.0.113.0/24
neutron router-gateway-set internal001 external001

111
library/patch Normal file
View File

@ -0,0 +1,111 @@
#!/usr/bin/python -tt
# (c) 2012, Luis Alberto Perez Lazaro <luisperlazaro@gmail.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: patch
author: Luis Alberto Perez Lazaro
version_added: 0.9
short_description: apply patch files
description:
- Apply patch files using the GNU patch tool. Before using this module make sure the patch tool is installed.
options:
patchfile:
required: true
description:
- A patch file as accepted by the gnu patch tool
strip:
required: true
aliases: [ p ]
description:
- Number that indicates the smallest prefix containing leading slashes that
will be stripped from each file name found in the patch file. For more information
see the strip parameter of the gnu patch tool.
basedir:
required: true
description:
- base directory in which the patch file will be applied
examples:
- code: "patch: patchfile=/tmp/critical.patch strip=1 basedir=/usr/share/pyshared/paramiko"
description: Example git checkout from Ansible Playbooks
'''
def _run(args):
cmd = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
rc = cmd.returncode
return (rc, out, err)
def _is_already_applied(patch_file, strip):
reverse_cmd = "patch -s -R -N -p%s --dry-run < %s" % (strip, patch_file)
(rc, _, _) = _run(reverse_cmd)
return rc == 0
def _apply_patch(module, patch_file, strip):
patch_cmd = "patch -s -N -t -r - -p%s < %s" % (strip, patch_file)
(rc, out, err) = _run(patch_cmd)
if rc != 0:
msg = out if not err else err
module.fail_json(msg=msg)
def _get_params(module):
patchfile = os.path.expanduser(module.params['patchfile'])
strip = module.params['strip']
basedir = module.params['basedir']
if basedir:
os.chdir(os.path.expanduser(basedir))
if not os.path.exists(patchfile):
module.fail_json(msg="patchfile %s doesn't exist" % (patchfile))
if not os.access(patchfile, os.R_OK):
module.fail_json(msg="patchfile %s not readable" % (patchfile))
if not os.path.exists(basedir):
module.fail_json(msg="basedir %s doesn't exist" % (patchfile))
try:
strip = int(strip)
except Exception:
module.fail_json(msg="p must be a number")
return patchfile, strip, basedir
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
patchfile=dict(required=True),
basedir=dict(),
strip=dict(default=0, aliases=['p'])
),
)
patchfile, strip, basedir = _get_params(module)
changed = False
if not _is_already_applied(patchfile, strip):
_apply_patch(module, patchfile, strip)
changed = True
module.exit_json(changed=changed)
# include magic from lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()

0
log/.placeholder Normal file
View File

9
patches/mongodb.pp Normal file
View File

@ -0,0 +1,9 @@
--- a/usr/lib/python2.7/site-packages/packstack/puppet/templates/mongodb.pp 2014-12-02 16:19:58.274432117 +0000
+++ b/usr/lib/python2.7/site-packages/packstack/puppet/templates/mongodb.pp 2014-12-02 16:20:15.620432319 +0000
@@ -3,5 +3,5 @@
class { 'mongodb::server':
smallfiles => true,
bind_ip => [$mongodb_host],
+ pidfilepath => '/var/run/mongodb/mongod.pid',
}
-

View File

@ -0,0 +1,11 @@
--- a/usr/lib/python2.7/site-packages/packstack/puppet/templates/nova_compute_libvirt.pp 2014-12-02 16:17:54.839430681 +0000
+++ b/usr/lib/python2.7/site-packages/packstack/puppet/templates/nova_compute_libvirt.pp 2014-12-02 16:18:03.554430782 +0000
@@ -3,7 +3,7 @@
# Ensure Firewall changes happen before libvirt service start
# preventing a clash with rules being set by libvirt
-if $::is_virtual_packstack == 'true' {
+if $::is_virtual == 'true' {
$libvirt_virt_type = 'qemu'
$libvirt_cpu_mode = 'none'
} else {

11
playbook.yaml Normal file
View File

@ -0,0 +1,11 @@
- hosts: all
sudo: True
tasks:
- include_vars: config.yaml
- include: ansible/initialize.yaml
- include: ansible/controller.yaml
when: inventory_hostname_short == 'controller'
- include: ansible/network.yaml
when: inventory_hostname_short == 'network'
- include: ansible/nfs.yaml
when: inventory_hostname_short == 'nfs'

37
scripts/bootstrap.sh Executable file
View File

@ -0,0 +1,37 @@
#!/bin/bash
run() {
python scripts/get_hosts.py | xargs -n 1 -P 1 -I BOX sh -c "echo - BOX && (vagrant $* BOX 2>&1 >> log/BOX.log)"
}
if [[ ! -e config.yaml ]]; then
echo "error: configuration file 'config.yaml' does not exist"
exit 1
fi
echo "$(date) cleaning up"
rm -f log/*
vagrant destroy
vagrant box list | grep packstack-template > /dev/null
if [[ $? -ne 0 ]]; then
echo "$(date) preparing template"
vagrant up template 2>&1 >> log/template.log
vagrant halt template 2>&1 >> log/template.log
vagrant package --output packstack-template.box template 2>&1 >> log/template.log
vagrant destroy --force template 2>&1 >> log/template.log
vagrant box add --force --name packstack-template --provider virtualbox packstack-template.box 2>&1 >> log/template.log
rm -f packstack-template.box
fi
echo "$(date) brining up all VMs"
run up --no-provision
echo "$(date) provisioning all VMs"
run provision
echo "$(date) reloading all VMs"
run reload
echo "$(date) initializing the controller node"
vagrant ssh controller -c '/home/vagrant/scripts/initialize.sh' 2>&1 >> log/controller.log

View File

@ -0,0 +1,7 @@
#!/usr/bin/env python
import sys
import yaml
sys.tracebacklimit = 0
yaml.load(open("config.yaml.sample"))

12
scripts/get_hosts.py Normal file
View File

@ -0,0 +1,12 @@
#!/usr/bin/python
import yaml
config = yaml.load(open('config.yaml'))
for name, value in config['address'].items():
if name == 'compute':
for index, _ in enumerate(value):
print("compute%d" % (index + 1))
else:
print(name)

5
scripts/reset.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
vagrant destroy --force
vagrant box remove --force packstack-template
#vagrant box remove --force b1-systems/centos-packstack

21
setup.cfg Normal file
View File

@ -0,0 +1,21 @@
[metadata]
name = packstack-vagrant
summary = Vagrant environment providing a Packstack installation
description-file = README.rst
author = Christian Berendt
author-email = berendt@b1-systems.de
home-page = http://github.com/b1-systems/packstack-vagrant
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
[build_sphinx]
source-dir = doc/source
build-dir = doc/build
all_files = 1
[upload_sphinx]
upload-dir = doc/build/html

22
setup.py Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)

39
tox.ini Normal file
View File

@ -0,0 +1,39 @@
[tox]
minversion = 1.6
envlist = docs,lint
skipsdist = True
[testenv]
usedevelop = False
install_command = pip install {opts} {packages}
[testenv:docs]
deps =
Pygments
docutils
sphinx>=1.1.2,<1.2
pbr>=0.6,!=0.7,<1.0
oslosphinx
commands = python setup.py build_sphinx
[testenv:lint]
whitelist_externals = bash
deps =
ansible
bashate
doc8
flake8
PyYAML
commands =
ansible-playbook --syntax-check playbook.yaml
bash -c "find {toxinidir} \
\( -wholename \*/files/\*.sh -or \
-wholename \*/scripts/\*.sh \) -print0 | \
xargs -0 bashate -v"
doc8 -e rst doc README.rst
flake8
python scripts/check_yaml_syntax.py config.yaml.sample
[flake8]
show-source = True
exclude=.venv,.git,.tox