Retire project

Change-Id: I56340a185c1c3527d53a2050c997a235d7ddf5a7
This commit is contained in:
Filip Pytloun 2017-01-25 18:22:19 +01:00
parent abb96a8da5
commit 0c0a634f16
76 changed files with 7 additions and 17931 deletions

4
.gitignore vendored
View File

@ -1,4 +0,0 @@
tests/build/
*.swp
*.pyc
.ropeproject

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/salt-formula-neutron.git

View File

@ -1,10 +0,0 @@
neutron formula
===============
2016.4.1 (2016-04-15)
- second release
0.0.1 (2015-08-03)
- Initial formula setup

View File

@ -1,8 +0,0 @@
name: neutron
os: Debian, RedHat
os_family: Debian, RedHat
version: 201606
release: 1
summary: Formula for installing and configuring neutron
description: Formula for installing and configuring neutron
top_level_dir: neutron

13
LICENSE
View File

@ -1,13 +0,0 @@
Copyright (c) 2014-2015 tcp cloud a. s.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,26 +0,0 @@
DESTDIR=/
SALTENVDIR=/usr/share/salt-formulas/env
RECLASSDIR=/usr/share/salt-formulas/reclass
FORMULANAME=$(shell grep name: metadata.yml|head -1|cut -d : -f 2|grep -Eo '[a-z0-9\-]*')
all:
@echo "make install - Install into DESTDIR"
@echo "make test - Run tests"
@echo "make clean - Cleanup after tests run"
install:
# Formula
[ -d $(DESTDIR)/$(SALTENVDIR) ] || mkdir -p $(DESTDIR)/$(SALTENVDIR)
cp -a $(FORMULANAME) $(DESTDIR)/$(SALTENVDIR)/
[ ! -d _modules ] || cp -a _modules $(DESTDIR)/$(SALTENVDIR)/
[ ! -d _states ] || cp -a _states $(DESTDIR)/$(SALTENVDIR)/ || true
# Metadata
[ -d $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME) ] || mkdir -p $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
cp -a metadata/service/* $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
test:
[ ! -d tests ] || (cd tests; ./run_tests.sh)
clean:
[ ! -d tests/build ] || rm -rf tests/build
[ ! -d build ] || rm -rf build

View File

@ -1,625 +1,9 @@
=======================
Neutron Network Service
=======================
Project moved
=============
Neutron is an OpenStack project to provide "networking as a service" between
interface devices (e.g., vNICs) managed by other Openstack services (e.g.,
nova).
This repository as a part of openstack-salt project was moved to join rest of
salt-formulas ecosystem.
Starting in the Folsom release, Neutron is a core and supported part of the
OpenStack platform (for Essex, we were an "incubated" project, which means use
is suggested only for those who really know what they're doing with Neutron).
Sample pillars
==============
Neutron Server on the controller node
.. code-block:: yaml
neutron:
server:
enabled: true
version: mitaka
bind:
address: 172.20.0.1
port: 9696
database:
engine: mysql
host: 127.0.0.1
port: 3306
name: neutron
user: neutron
password: pwd
identity:
engine: keystone
host: 127.0.0.1
port: 35357
user: neutron
password: pwd
tenant: service
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
metadata:
host: 127.0.0.1
port: 8775
password: pass
audit:
enabled: false
Neutron VXLAN tenant networks with Network Nodes (with DVR for East-West
and Network node for North-South)
=========================================================================
===================================
This use case describes a model utilising VxLAN overlay with DVR. The DVR
routers will only be utilized for traffic that is router within the cloud
infrastructure and that remains encapsulated. External traffic will be
routed to via the network nodes.
The intention is that each tenant will require at least two (2) vrouters
one to be utilised
Neutron Server only
-------------------
.. code-block:: yaml
neutron:
server:
version: mitaka
plugin: ml2
bind:
address: 172.20.0.1
port: 9696
database:
engine: mysql
host: 127.0.0.1
port: 3306
name: neutron
user: neutron
password: pwd
identity:
engine: keystone
host: 127.0.0.1
port: 35357
user: neutron
password: pwd
tenant: service
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
global_physnet_mtu: 9000
l3_ha: False # Which type of router will be created by default
dvr: True # disabled for non DVR use case
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
external_mtu: 9000
mechanism:
ovs:
driver: openvswitch
Network Node only
-----------------
.. code-block:: yaml
neutron:
gateway:
enabled: True
version: mitaka
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
local_ip: 192.168.20.20 # br-mesh ip address
dvr: True # disabled for non DVR use case
agent_mode: dvr_snat
metadata:
host: 127.0.0.1
password: pass
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
Compute Node
-------------
.. code-block:: yaml
neutron:
compute:
enabled: True
version: mitaka
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
local_ip: 192.168.20.20 # br-mesh ip address
dvr: True # disabled for non DVR use case
agent_mode: dvr
external_access: false # Compute node with DVR for east-west only, Network Node has True as default
metadata:
host: 127.0.0.1
password: pass
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
audit:
enabled: false
Neutron VXLAN tenant networks with Network Nodes (non DVR)
==========================================================
This section describes a network solution that utilises VxLAN overlay
networks without DVR with all routers being managed on the network nodes.
Neutron Server only
-------------------
.. code-block:: yaml
neutron:
server:
version: mitaka
plugin: ml2
bind:
address: 172.20.0.1
port: 9696
database:
engine: mysql
host: 127.0.0.1
port: 3306
name: neutron
user: neutron
password: pwd
identity:
engine: keystone
host: 127.0.0.1
port: 35357
user: neutron
password: pwd
tenant: service
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
global_physnet_mtu: 9000
l3_ha: True
dvr: False
backend:
engine: ml2
tenant_network_types= "flat,vxlan"
external_mtu: 9000
mechanism:
ovs:
driver: openvswitch
Network Node only
-----------------
.. code-block:: yaml
neutron:
gateway:
enabled: True
version: mitaka
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
local_ip: 192.168.20.20 # br-mesh ip address
dvr: False
agent_mode: legacy
metadata:
host: 127.0.0.1
password: pass
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
Compute Node
-------------
.. code-block:: yaml
neutron:
compute:
enabled: True
version: mitaka
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
local_ip: 192.168.20.20 # br-mesh ip address
external_access: False
dvr: False
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
Neutron VXLAN tenant networks with Network Nodes (with DVR for
East-West and North-South, DVR everywhere, Network node for SNAT)
==============================================================
========================================================
This section describes a network solution that utilises VxLAN
overlay networks with DVR with North-South and East-West. Network
Node is used only for SNAT.
Neutron Server only
-------------------
.. code-block:: yaml
neutron:
server:
version: mitaka
plugin: ml2
bind:
address: 172.20.0.1
port: 9696
database:
engine: mysql
host: 127.0.0.1
port: 3306
name: neutron
user: neutron
password: pwd
identity:
engine: keystone
host: 127.0.0.1
port: 35357
user: neutron
password: pwd
tenant: service
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
global_physnet_mtu: 9000
l3_ha: False
dvr: True
backend:
engine: ml2
tenant_network_types= "flat,vxlan"
external_mtu: 9000
mechanism:
ovs:
driver: openvswitch
Network Node only
-----------------
.. code-block:: yaml
neutron:
gateway:
enabled: True
version: mitaka
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
local_ip: 192.168.20.20 # br-mesh ip address
dvr: True
agent_mode: dvr_snat
metadata:
host: 127.0.0.1
password: pass
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
Compute Node
-------------
.. code-block:: yaml
neutron:
compute:
enabled: True
version: mitaka
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: pwd
virtual_host: '/openstack'
local_ip: 192.168.20.20 # br-mesh ip address
dvr: True
external_access: True
agent_mode: dvr
metadata:
host: 127.0.0.1
password: pass
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
Sample Linux network configuration for DVR
--------------------------------------------
.. code-block:: yaml
linux:
network:
bridge: openvswitch
interface:
eth1:
enabled: true
type: eth
mtu: 9000
proto: manual
eth2:
enabled: true
type: eth
mtu: 9000
proto: manual
eth3:
enabled: true
type: eth
mtu: 9000
proto: manual
br-int:
enabled: true
mtu: 9000
type: ovs_bridge
br-floating:
enabled: true
mtu: 9000
type: ovs_bridge
float-to-ex:
enabled: true
type: ovs_port
mtu: 65000
bridge: br-floating
br-mgmt:
enabled: true
type: bridge
mtu: 9000
address: ${_param:single_address}
netmask: 255.255.255.0
use_interfaces:
- eth1
br-mesh:
enabled: true
type: bridge
mtu: 9000
address: ${_param:tenant_address}
netmask: 255.255.255.0
use_interfaces:
- eth2
br-ex:
enabled: true
type: bridge
mtu: 9000
address: ${_param:external_address}
netmask: 255.255.255.0
use_interfaces:
- eth3
use_ovs_ports:
- float-to-ex
Neutron VLAN tenant networks with Network Nodes
===============================================
VLAN tenant provider
Neutron Server only
-------------------
.. code-block:: yaml
neutron:
server:
version: mitaka
plugin: ml2
...
global_physnet_mtu: 9000
l3_ha: False
dvr: True
backend:
engine: ml2
tenant_network_types: "flat,vlan" # Can be mixed flat,vlan,vxlan
tenant_vlan_range: "1000:2000"
external_vlan_range: "100:200" # Does not have to be defined.
external_mtu: 9000
mechanism:
ovs:
driver: openvswitch
Compute node
-------------------
.. code-block:: yaml
neutron:
compute:
version: mitaka
plugin: ml2
...
dvr: True
agent_mode: dvr
external_access: False
backend:
engine: ml2
tenant_network_types: "flat,vlan" # Can be mixed flat,vlan,vxlan
mechanism:
ovs:
driver: openvswitch
Neutron Server with OpenContrail
==================================
.. code-block:: yaml
neutron:
server:
plugin: contrail
backend:
engine: contrail
host: contrail_discovery_host
port: 8082
user: admin
password: password
tenant: admin
token: token
Neutron Server with Midonet
===========================
.. code-block:: yaml
neutron:
server:
backend:
engine: midonet
host: midonet_api_host
port: 8181
user: admin
password: password
Other
=====
Neutron Keystone region
.. code-block:: yaml
neutron:
server:
enabled: true
version: kilo
...
identity:
region: RegionTwo
...
compute:
region: RegionTwo
...
Client-side RabbitMQ HA setup
.. code-block:: yaml
neutron:
server:
....
message_queue:
engine: rabbitmq
members:
- host: 10.0.16.1
- host: 10.0.16.2
- host: 10.0.16.3
user: openstack
password: pwd
virtual_host: '/openstack'
....
Enable auditing filter, ie: CADF
.. code-block:: yaml
neutron:
server:
audit:
enabled: true
....
filter_factory: 'keystonemiddleware.audit:filter_factory'
map_file: '/etc/pycadf/neutron_api_audit_map.conf'
....
compute:
audit:
enabled: true
....
filter_factory: 'keystonemiddleware.audit:filter_factory'
map_file: '/etc/pycadf/neutron_api_audit_map.conf'
....
Usage
=====
Fix RDO Neutron installation
.. code-block:: yaml
neutron-db-manage --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini stamp havana
Documentation and Bugs
============================
To learn how to deploy OpenStack Salt, consult the documentation available
online at:
https://wiki.openstack.org/wiki/OpenStackSalt
In the unfortunate event that bugs are discovered, they should be reported to
the appropriate bug tracker. If you obtained the software from a 3rd party
operating system vendor, it is often wise to use their own bug tracker for
reporting problems. In all other cases use the master OpenStack bug tracker,
available at:
http://bugs.launchpad.net/openstack-salt
Developers wishing to work on the OpenStack Salt project should always base
their work on the latest formulas code, available from the master GIT
repository at:
https://git.openstack.org/cgit/openstack/salt-formula-neutron
Developers should also join the discussion on the IRC list, at:
https://wiki.openstack.org/wiki/Meetings/openstack-salt
Github: https://github.com/salt-formulas
Launchpad https://launchpad.net/salt-formulas
IRC: #salt-formulas @ irc.freenode.net

View File

@ -1 +0,0 @@
2016.4.1

View File

@ -1 +0,0 @@
python-yaml

View File

@ -1,73 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'salt-formula-neutron'
copyright = u'2015, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -1 +0,0 @@
.. include:: ../../README.rst

View File

@ -1,3 +0,0 @@
name: "neutron"
version: "2016.4.1"
source: "https://github.com/openstack/salt-formula-neutron"

View File

@ -1,28 +0,0 @@
applications:
- neutron
classes:
- service.neutron.support
parameters:
neutron:
compute:
enabled: true
version: ${_param:neutron_version}
message_queue:
engine: rabbitmq
host: ${_param:cluster_vip_address}
port: 5672
user: openstack
password: ${_param:rabbitmq_openstack_password}
virtual_host: '/openstack'
local_ip: ${_param:tenant_address}
dvr: false
external_access: false
metadata:
host: ${_param:cluster_vip_address}
password: ${_param:metadata_password}
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch

View File

@ -1,41 +0,0 @@
applications:
- neutron
classes:
- service.neutron.support
parameters:
neutron:
server:
enabled: true
dns_domain: novalocal
version: ${_param:neutron_version}
bind:
address: ${_param:cluster_local_address}
port: 9696
database:
engine: mysql
host: ${_param:cluster_vip_address}
port: 3306
name: neutron
user: neutron
password: ${_param:mysql_neutron_password}
identity:
engine: keystone
region: RegionOne
host: ${_param:cluster_vip_address}
port: 35357
user: neutron
password: ${_param:keystone_neutron_password}
tenant: service
message_queue:
engine: rabbitmq
host: ${_param:cluster_vip_address}
port: 5672
user: openstack
password: ${_param:rabbitmq_openstack_password}
virtual_host: '/openstack'
compute:
host: ${_param:cluster_vip_address}
region: RegionOne
user: nova
password: ${_param:keystone_nova_password}
tenant: service

View File

@ -1,44 +0,0 @@
parameters:
kubernetes:
control:
configmap:
neutron-server:
grains:
os_family: Debian
pillar:
neutron:
server:
enabled: true
dns_domain: novalocal
version: ${_param:neutron_version}
bind:
address: 0.0.0.0
port: 9696
database:
engine: mysql
host: ${_param:mysql_service_host}
port: 3306
name: neutron
user: neutron
password: ${_param:mysql_neutron_password}
identity:
engine: keystone
region: RegionOne
host: ${_param:keystone_service_host}
port: 35357
user: neutron
password: ${_param:keystone_neutron_password}
tenant: service
message_queue:
engine: rabbitmq
host: ${_param:rabbitmq_service_host}
port: 5672
user: openstack
password: ${_param:rabbitmq_openstack_password}
virtual_host: '/openstack'
compute:
host: ${_param:nova_service_host}
region: RegionOne
user: nova
password: ${_param:keystone_nova_password}
tenant: service

View File

@ -1,44 +0,0 @@
applications:
- neutron
classes:
- service.neutron.support
parameters:
neutron:
server:
enabled: true
plugin: ml2
fwaas: false
dns_domain: novalocal
tunnel_type: vxlan
version: ${_param:neutron_version}
bind:
address: ${_param:single_address}
port: 9696
database:
engine: mysql
host: ${_param:single_address}
port: 3306
name: neutron
user: neutron
password: ${_param:mysql_neutron_password}
identity:
engine: keystone
region: RegionOne
host: ${_param:single_address}
port: 35357
user: neutron
password: ${_param:keystone_neutron_password}
tenant: service
message_queue:
engine: rabbitmq
host: ${_param:single_address}
port: 5672
user: openstack
password: ${_param:rabbitmq_openstack_password}
virtual_host: '/openstack'
compute:
host: ${_param:single_address}
region: RegionOne
user: nova
password: ${_param:keystone_nova_password}
tenant: service

View File

@ -1,28 +0,0 @@
applications:
- neutron
classes:
- service.neutron.support
parameters:
neutron:
gateway:
enabled: true
version: ${_param:neutron_version}
message_queue:
engine: rabbitmq
host: ${_param:cluster_vip_address}
port: 5672
user: openstack
password: ${_param:rabbitmq_openstack_password}
virtual_host: '/openstack'
local_ip: ${_param:tenant_address}
dvr: false
external_access: True
metadata:
host: ${_param:cluster_vip_address}
password: ${_param:metadata_password}
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch

View File

@ -1,15 +0,0 @@
parameters:
neutron:
_support:
collectd:
enabled: true
heka:
enabled: true
sensu:
enabled: true
sphinx:
enabled: true
config:
enabled: true
grafana:
enabled: true

View File

@ -1,58 +0,0 @@
{% from "neutron/map.jinja" import compute with context %}
{%- if compute.enabled %}
neutron_compute_packages:
pkg.installed:
- names: {{ compute.pkgs }}
/etc/neutron/neutron.conf:
file.managed:
- source: salt://neutron/files/{{ compute.version }}/neutron-generic.conf.{{ grains.os_family }}
- template: jinja
- require:
- pkg: neutron_compute_packages
{% if compute.dvr %}
neutron_dvr_packages:
pkg.installed:
- names:
- neutron-l3-agent
- neutron-metadata-agent
/etc/neutron/l3_agent.ini:
file.managed:
- source: salt://neutron/files/{{ compute.version }}/l3_agent.ini
- template: jinja
- watch_in:
- service: neutron_compute_services
- require:
- pkg: neutron_compute_packages
/etc/neutron/metadata_agent.ini:
file.managed:
- source: salt://neutron/files/{{ compute.version }}/metadata_agent.ini
- template: jinja
- watch_in:
- service: neutron_compute_services
- require:
- pkg: neutron_compute_packages
{% endif %}
/etc/neutron/plugins/ml2/openvswitch_agent.ini:
file.managed:
- source: salt://neutron/files/{{ compute.version }}/openvswitch_agent.ini
- template: jinja
- require:
- pkg: neutron_compute_packages
neutron_compute_services:
service.running:
- names: {{ compute.services }}
- enable: true
- watch:
- file: /etc/neutron/neutron.conf
- file: /etc/neutron/plugins/ml2/openvswitch_agent.ini
{%- endif %}

View File

@ -1,10 +0,0 @@
Import "openstack_neutron"
<Module "openstack_neutron">
KeystoneUrl "{{ plugin.url }}"
Username "{{ plugin.username }}"
Password "{{ plugin.password }}"
Tenant "{{ plugin.tenant }}"
MaxRetries "2"
Timeout "20"
</Module>

View File

@ -1,25 +0,0 @@
{%- from "neutron/map.jinja" import server with context -%}
#!/bin/bash -e
cat /srv/salt/pillar/neutron-server.sls | envsubst > /tmp/neutron-server.sls
mv /tmp/neutron-server.sls /srv/salt/pillar/neutron-server.sls
salt-call --local --retcode-passthrough state.highstate
{% for service in server.services %}
service {{ service }} stop || true
{% endfor %}
if [ "$1" == "server" ]; then
echo "starting neutron-server"
su neutron --shell=/bin/sh -c '/usr/bin/neutron-server --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/opencontrail/ContrailPlugin.ini'
elif [ "$1" == "lbaas-agent" ]; then
echo "starting neutron-lbaas-agent"
su neutron --shell=/bin/sh -c '/usr/bin/neutron-lbaas-agent --config-file=/etc/neutron/neutron.conf'
else
echo "No parameter submitted, don't know what to start" 1>&2
fi
{#-
vim: syntax=jinja
-#}

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +0,0 @@
[logstreamer_neutron]
type = "LogstreamerInput"
log_directory = "/var/log/neutron"
file_match = '(?P<Service>.+)\.log\.?(?P<Index>\d*)?(.gz)?'
differentiator = ['neutron','_','Service']
priority = ["^Index"]
decoder = "openstack"
oldest_duration = "168h"
[openstack]
type = "SandboxDecoder"
filename = "lua_modules/decoders/openstack.lua"
module_directory = "/usr/share/heka/lua_modules;/usr/share/heka/lua_modules/common"

View File

@ -1,15 +0,0 @@
{% from "neutron/map.jinja" import server with context %}
{% from "opencontrail/map.jinja" import config with context %}
[APISERVER]
api_server_ip = {{ config.discovery.host }}
api_server_port = 8082
multi_tenancy = True
contrail_extensions = ipam:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_ipam.NeutronPluginContrailIpam,policy:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_policy.NeutronPluginContrailPolicy,route-table:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_vpc.NeutronPluginContrailVpc,contrail:None
[KEYSTONE]
auth_url = http://{{ config.identity.host }}:35357/v2.0
admin_token = {{ config.identity.token }}
admin_user={{ config.identity.user }}
admin_password={{ config.identity.password }}
admin_tenant_name={{ config.identity.tenant }}

View File

@ -1,39 +0,0 @@
{%- if pillar.neutron.server is defined %}
{%- set neutron = pillar.neutron.server %}
{%- elif pillar.neutron.switch is defined %}
{%- set neutron = pillar.neutron.switch %}
{%- elif pillar.neutron.bridge is defined %}
{%- set neutron = pillar.neutron.bridge %}
{%- endif %}
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = extensions neutronapiapp_v2_0
keystone = authtoken keystonecontext extensions neutronapiapp_v2_0
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_port={{ neutron.identity.port }}
auth_protocol=http
auth_url=http://{{ neutron.identity.host }}:{{ neutron.identity.port }}/v2.0
auth_host={{ neutron.identity.host }}
admin_tenant_name = {{ neutron.identity.tenant }}
admin_user = {{ neutron.identity.user }}
admin_password = {{ neutron.identity.password }}
signing_dir = /var/lib/neutron/keystone-signing
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory

View File

@ -1 +0,0 @@
api-paste.ini.Debian

View File

@ -1,16 +0,0 @@
# Generated by Salt.
{%- from "neutron/map.jinja" import server with context %}
# defaults for neutron-server
# path to config file corresponding to the core_plugin specified in
# neutron.conf
#NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
{%- if server.plugin == "ml2" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/ml2/ml2_conf.ini"
{%- endif %}
{%- if server.plugin == "contrail" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
{%- endif %}

View File

@ -1,514 +0,0 @@
{%- set neutron = pillar.neutron.server %}
{%- from "neutron/map.jinja" import server with context %}
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
# verbose = False
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
bind_host = {{ neutron.bind.address }}
# Port the bind the API server to
# bind_port = 9696
bind_port = {{ neutron.bind.port }}
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
core_plugin = neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2
api_extensions_path = extensions:/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
# Paste configuration file
# api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
# dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# IP address of the RabbitMQ installation
# rabbit_host = localhost
# Password of the RabbitMQ server
# rabbit_password = guest
# Port where RabbitMQ server is running/listening
# rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
# rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# ###rpc_backend = neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
{%- if server.notification %}
notification_driver = neutron.openstack.common.notifier.rpc_notifier
{%- endif %}
#bind_port = 9697
auth_strategy = keystone
allow_overlapping_ips = True
#rabbit_host = 10.0.102.35
service_plugins = neutron_plugin_contrail.plugins.opencontrail.loadbalancer.plugin.LoadBalancerPlugin
log_format = %(asctime)s.%(msecs)d %(levelname)8s [%(name)s] %(message)s
#rabbit_port = 5673
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
rabbit_max_retries = 0
#rabbit_ha_queues = True
rpc_cast_timeout = 30
rpc_conn_pool_size = 40
rpc_response_timeout = 60
rpc_thread_pool_size = 70
rabbit_host = {{ neutron.message_queue.host }}
rabbit_port = {{ neutron.message_queue.port }}
rabbit_userid = {{ neutron.message_queue.user }}
rabbit_password = {{ neutron.message_queue.password }}
rabbit_virtual_host = {{ neutron.message_queue.virtual_host }}
# default_notification_level is used to form actual topic name(s) or to set logging level
# default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
# notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
# api_workers = 0
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
quota_driver = neutron_plugin_contrail.plugins.opencontrail.quota.driver.QuotaDriver
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
# default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
# quota_network = 10
# Number of subnets allowed per tenant. A negative value means unlimited.
# quota_subnet = 10
# Number of ports allowed per tenant. A negative value means unlimited.
# quota_port = 50
# Number of security groups allowed per tenant. A negative value means
# unlimited.
# quota_security_group = 10
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
# quota_security_group_rule = 100
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
#auth_host = 10.0.102.35
#auth_port = 35357
#auth_protocol = http
#admin_tenant_name = service
#admin_user = neutron
#admin_password = ad1b8c6b91b59f556ed2
signing_dir = $state_path/keystone-signing
#admin_token = ad1b8c6b91b59f556ed2
auth_host = {{ neutron.identity.host }}
auth_port = {{ neutron.identity.port }}
auth_protocol = http
admin_tenant_name = {{ neutron.identity.tenant }}
admin_user = {{ neutron.identity.user }}
admin_password = {{ neutron.identity.password }}
auth_uri=http://{{ neutron.identity.host }}:5000/
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
connection = sqlite:////var/lib/neutron/neutron.sqlite
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[service_providers]
service_provider = LOADBALANCER:Opencontrail:neutron_plugin_contrail.plugins.opencontrail.loadbalancer.driver.OpencontrailLoadbalancerDriver:default
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
[QUOTAS]
quota_network = -1
quota_subnet = -1
quota_port = -1
[NOVA]
vif_types = vrouter

View File

@ -1 +0,0 @@
neutron-server.conf.contrail.Debian

View File

@ -1,13 +0,0 @@
{% from "neutron/map.jinja" import server with context %}
[APISERVER]
api_server_ip = {{ server.backend.host }}
api_server_port = 8082
multi_tenancy = True
contrail_extensions = ipam:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_ipam.NeutronPluginContrailIpam,policy:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_policy.NeutronPluginContrailPolicy,route-table:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_vpc.NeutronPluginContrailVpc,contrail:None
[KEYSTONE]
auth_url = http://{{ server.identity.host }}:35357/v2.0
admin_token = {{ server.backend.token }}
admin_user={{ server.backend.user }}
admin_password={{ server.backend.password }}
admin_tenant_name={{ server.backend.tenant }}

View File

@ -1,45 +0,0 @@
{%- if pillar.neutron.server is defined %}
{%- set neutron = pillar.neutron.server %}
{%- elif pillar.neutron.switch is defined %}
{%- set neutron = pillar.neutron.switch %}
{%- elif pillar.neutron.bridge is defined %}
{%- set neutron = pillar.neutron.bridge %}
{%- endif %}
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = request_id catch_errors extensions neutronapiapp_v2_0
keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
[filter:request_id]
paste.filter_factory = oslo.middleware:RequestId.factory
[filter:catch_errors]
paste.filter_factory = oslo.middleware:CatchErrors.factory
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
auth_port={{ neutron.identity.port }}
auth_protocol=http
auth_url=http://{{ neutron.identity.host }}:{{ neutron.identity.port }}/v2.0
auth_host={{ neutron.identity.host }}
admin_tenant_name = {{ neutron.identity.tenant }}
admin_user = {{ neutron.identity.user }}
admin_password = {{ neutron.identity.password }}
signing_dir = /var/lib/neutron/keystone-signing
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory

View File

@ -1 +0,0 @@
api-paste.ini.Debian

View File

@ -1,12 +0,0 @@
{% from "neutron/map.jinja" import server with context %}
[DATABASE]
sql_connection = {{ server.database.engine }}://{{ server.database.user }}:{{ server.database.password }}@{{ server.database.host }}/{{ server.database.name }}
[MIDONET]
# MidoNet API URL
midonet_uri = http://{{ server.backend.host }}:8181/midonet-api
# MidoNet administrative user in Keystone
username = {{ server.backend.user }}
password = {{ server.backend.password }}
# MidoNet administrative user's tenant
project_id = service

View File

@ -1,20 +0,0 @@
# Generated by Salt.
{%- from "neutron/map.jinja" import server with context %}
# defaults for neutron-server
# path to config file corresponding to the core_plugin specified in
# neutron.conf
#NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
{%- if server.backend.engine == "ml2" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/ml2/ml2_conf.ini"
{%- endif %}
{%- if server.backend.engine == "contrail" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
{%- endif %}
{%- if server.backend.engine == "midonet" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/midonet/midonet.ini"
{%- endif %}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +0,0 @@
{% from "neutron/map.jinja" import server with context %}
[APISERVER]
api_server_ip = {{ server.backend.host }}
api_server_port = 8082
multi_tenancy = True
contrail_extensions = ipam:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_ipam.NeutronPluginContrailIpam,policy:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_policy.NeutronPluginContrailPolicy,route-table:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_vpc.NeutronPluginContrailVpc,contrail:None
[KEYSTONE]
auth_url = http://{{ server.identity.host }}:35357/v2.0
admin_token = {{ server.backend.token }}
admin_user={{ server.backend.user }}
admin_password={{ server.backend.password }}
admin_tenant_name={{ server.backend.tenant }}

View File

@ -1,51 +0,0 @@
{%- from "neutron/map.jinja" import server with context %}
{%- if pillar.neutron.server is defined %}
{%- set neutron = pillar.neutron.server %}
{%- elif pillar.neutron.switch is defined %}
{%- set neutron = pillar.neutron.switch %}
{%- elif pillar.neutron.bridge is defined %}
{%- set neutron = pillar.neutron.bridge %}
{%- endif %}
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = request_id catch_errors extensions neutronapiapp_v2_0
keystone = request_id catch_errors authtoken keystonecontext extensions {% if server.audit.enabled %}audit {% endif %}neutronapiapp_v2_0
[filter:request_id]
paste.filter_factory = oslo_middleware:RequestId.factory
[filter:catch_errors]
paste.filter_factory = oslo_middleware:CatchErrors.factory
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
auth_port={{ neutron.identity.port }}
auth_protocol=http
auth_url=http://{{ neutron.identity.host }}:{{ neutron.identity.port }}/v2.0
auth_host={{ neutron.identity.host }}
admin_tenant_name = {{ neutron.identity.tenant }}
admin_user = {{ neutron.identity.user }}
admin_password = {{ neutron.identity.password }}
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory
{%- if server.audit.enabled %}
[filter:audit]
paste.filter_factory = {{ server.get("audit", {}).get("filter_factory", "keystonemiddleware.audit:filter_factory") }}
audit_map_file = {{ server.get("audit", {}).get("map_file", "/etc/pycadf/neutron_api_audit_map.conf") }}
{%- endif %}

View File

@ -1 +0,0 @@
api-paste.ini.Debian

View File

@ -1,12 +0,0 @@
{% from "neutron/map.jinja" import server with context %}
[DATABASE]
sql_connection = {{ server.database.engine }}://{{ server.database.user }}:{{ server.database.password }}@{{ server.database.host }}/{{ server.database.name }}
[MIDONET]
# MidoNet API URL
midonet_uri = http://{{ server.backend.host }}:8181/midonet-api
# MidoNet administrative user in Keystone
username = {{ server.backend.user }}
password = {{ server.backend.password }}
# MidoNet administrative user's tenant
project_id = service

View File

@ -1,20 +0,0 @@
# Generated by Salt.
{%- from "neutron/map.jinja" import server with context %}
# defaults for neutron-server
# path to config file corresponding to the core_plugin specified in
# neutron.conf
#NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
{%- if server.backend.engine == "ml2" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/ml2/ml2_conf.ini"
{%- endif %}
{%- if server.backend.engine == "contrail" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
{%- endif %}
{%- if server.backend.engine == "midonet" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/midonet/midonet.ini"
{%- endif %}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +0,0 @@
{% from "neutron/map.jinja" import server with context %}
[APISERVER]
api_server_ip = {{ server.backend.host }}
api_server_port = 8082
multi_tenancy = True
contrail_extensions = ipam:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_ipam.NeutronPluginContrailIpam,policy:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_policy.NeutronPluginContrailPolicy,route-table:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_vpc.NeutronPluginContrailVpc,contrail:None
[KEYSTONE]
auth_url = http://{{ server.identity.host }}:35357/v2.0
admin_token = {{ server.backend.token }}
admin_user={{ server.backend.user }}
admin_password={{ server.backend.password }}
admin_tenant_name={{ server.backend.tenant }}

View File

@ -1,41 +0,0 @@
{%- from "neutron/map.jinja" import server with context %}
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = cors request_id catch_errors extensions neutronapiapp_v2_0
keystone = cors request_id catch_errors authtoken keystonecontext extensions {% if server.audit.enabled %}audit {% endif %}neutronapiapp_v2_0
[filter:request_id]
paste.filter_factory = oslo_middleware:RequestId.factory
[filter:catch_errors]
paste.filter_factory = oslo_middleware:CatchErrors.factory
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
oslo_config_project = neutron
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory
{%- if server.audit.enabled %}
[filter:audit]
paste.filter_factory = {{ server.get("audit", {}).get("filter_factory", "keystonemiddleware.audit:filter_factory") }}
audit_map_file = {{ server.get("audit", {}).get("map_file", "/etc/pycadf/neutron_api_audit_map.conf") }}
{%- endif %}

View File

@ -1 +0,0 @@
api-paste.ini.Debian

View File

@ -1,184 +0,0 @@
[DEFAULT]
#
# From neutron.base.agent
#
# Name of Open vSwitch bridge to use (string value)
#ovs_integration_bridge = br-int
# Uses veth for an OVS interface or not. Support kernels with limited namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to
# True. (boolean value)
#ovs_use_veth = false
# MTU setting for device. This option will be removed in Newton. Please use the system-wide global_physnet_mtu setting which the agents will
# take into account when wiring VIFs. (integer value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#network_device_mtu = <None>
# The driver used to manage the virtual interface. (string value)
#interface_driver = <None>
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs commands will fail with ALARMCLOCK error. (integer value)
#ovs_vsctl_timeout = 10
#
# From neutron.dhcp.agent
#
# The DHCP agent will resync its state with Neutron to recover from any transient notification or RPC errors. The interval is number of
# seconds between attempts. (integer value)
#resync_interval = 5
resync_interval = 30
# The driver used to manage the DHCP server. (string value)
#dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# The DHCP server can assist with providing metadata support on isolated networks. Setting this value to True will cause the DHCP server to
# append specific host routes to the DHCP request. The metadata service will only be activated when the subnet does not contain any router
# port. The guest instance must be configured to request host routes via DHCP (Option 121). This option doesn't have any effect when
# force_metadata is set to True. (boolean value)
#enable_isolated_metadata = false
enable_isolated_metadata = True
# In some cases the Neutron router is not present to provide the metadata IP but the DHCP server can be used to provide this info. Setting
# this value will force the DHCP server to append specific host routes to the DHCP request. If this option is set, then the metadata service
# will be activated for all the networks. (boolean value)
#force_metadata = false
# Allows for serving metadata requests coming from a dedicated metadata access network whose CIDR is 169.254.169.254/16 (or larger prefix),
# and is connected to a Neutron router from which the VMs send metadata:1 request. In this case DHCP Option 121 will not be injected in VMs,
# as they will be able to reach 169.254.169.254 through a router. This option requires enable_isolated_metadata = True. (boolean value)
#enable_metadata_network = false
enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection pool size configured on server. (integer value)
#num_sync_threads = 4
# Location to store DHCP server config files. (string value)
#dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames. This option is deprecated. It has been moved to neutron.conf as dns_domain. It will be removed
# in a future release. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file. (string value)
#dnsmasq_config_file =
# Comma-separated list of the DNS servers which will be used as forwarders. (list value)
# Deprecated group/name - [DEFAULT]/dnsmasq_dns_server
#dnsmasq_dns_servers = <None>
# Base log dir for dnsmasq logging. The log contains DHCP and DNS log information and is useful for debugging issues with either DHCP or
# DNS. If this section is null, disable dnsmasq log. (string value)
#dnsmasq_base_log_dir = <None>
# Enables the dnsmasq service to provide name resolution for instances via DNS resolvers on the host running the DHCP agent. Effectively
# removes the '--no-resolv' option from the dnsmasq process arguments. Adding custom DNS resolvers to the 'dnsmasq_dns_servers' option
# disables this feature. (boolean value)
#dnsmasq_local_resolv = false
# Limit number of leases to prevent a denial-of-service. (integer value)
#dnsmasq_lease_max = 16777216
# Use broadcast in DHCP replies. (boolean value)
#dhcp_broadcast_reply = false
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default INFO level. (boolean value)
#debug = false
debug = False
# If set to false, the logging level will be set to WARNING instead of the default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging
# configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging
# configuration is set in the configuration file and other logging configuration options are ignored (for example,
# logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
# (string value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by use_stderr. This
# option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified
# path instantaneously. It makes sense only if log_file option is specified and Linux platform is used. This option is ignored if
# log_config_append is set. (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if
# log_config_append is set. (boolean value)
#use_syslog = false
# Syslog facility to receive log lines. This option is ignored if log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
root_helper=sudo neutron-rootwrap /etc/neutron/rootwrap.conf
state_path=/var/lib/neutron
[AGENT]
#
# From neutron.base.agent
#
# Seconds between nodes reporting state to server; should be less than agent_down_time, best if it is half or less than agent_down_time.
# (floating point value)
#report_interval = 30
# Log agent heartbeats (boolean value)
#log_agent_heartbeats = false

View File

@ -1,230 +0,0 @@
{%- if pillar.neutron.gateway is defined %}
{%- from "neutron/map.jinja" import gateway as neutron with context %}
{%- else %}
{%- from "neutron/map.jinja" import compute as neutron with context %}
{%- endif %}
[DEFAULT]
#
# From neutron.base.agent
#
# Name of Open vSwitch bridge to use (string value)
#ovs_integration_bridge = br-int
# Uses veth for an OVS interface or not. Support kernels with limited namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to
# True. (boolean value)
#ovs_use_veth = false
# MTU setting for device. This option will be removed in Newton. Please use the system-wide global_physnet_mtu setting which the agents will
# take into account when wiring VIFs. (integer value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#network_device_mtu = <None>
# The driver used to manage the virtual interface. (string value)
#interface_driver = <None>
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs commands will fail with ALARMCLOCK error. (integer value)
#ovs_vsctl_timeout = 10
#
# From neutron.l3.agent
#
# The working mode for the agent. Allowed modes are: 'legacy' - this preserves the existing behavior where the L3 agent is deployed on a
# centralized networking node to provide L3 services like DNAT, and SNAT. Use this mode if you do not want to adopt DVR. 'dvr' - this mode
# enables DVR functionality and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - this enables centralized SNAT support
# in conjunction with DVR. This mode must be used for an L3 agent running on a centralized node (or in single-host deployments, e.g.
# devstack) (string value)
# Allowed values: dvr, dvr_snat, legacy
#agent_mode = legacy
agent_mode = {{ neutron.agent_mode }}
# TCP Port used by Neutron metadata namespace proxy. (port value)
# Minimum value: 0
# Maximum value: 65535
#metadata_port = 9697
metadata_port = 8775
# Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the feature is disabled (integer value)
#send_arp_for_ha = 3
# If non-empty, the l3 agent can only configure a router that has the matching router ID. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#router_id =
# Indicates that this L3 agent should also handle routers that do not have an external network gateway configured. This option should be
# True only for a single agent in a Neutron deployment, and may be False for all agents if all routers must have an external network
# gateway. (boolean value)
#handle_internal_only_routers = true
# When external_network_bridge is set, each L3 agent can be associated with no more than one external network. This value should be set to
# the UUID of that external network. To allow L3 agent support multiple external networks, both the external_network_bridge and
# gateway_external_network_id must be left empty. (string value)
#gateway_external_network_id =
# With IPv6, the network used for the external gateway does not need to have an associated subnet, since the automatically assigned link-
# local address (LLA) can be used. However, an IPv6 gateway address is needed for use as the next-hop for the default route. If no IPv6
# gateway address is configured here, (and only then) the neutron router will be configured to get its default route from router
# advertisements (RAs) from the upstream router; in which case the upstream router must also be configured to send these RAs. The
# ipv6_gateway, when configured, should be the LLA of the interface on the upstream router. If a next-hop using a global unique address
# (GUA) is desired, it needs to be done via a subnet allocated to the network and not through this parameter. (string value)
#ipv6_gateway =
# Driver used for ipv6 prefix delegation. This needs to be an entry point defined in the neutron.agent.linux.pd_drivers namespace. See
# setup.cfg for entry points included with the neutron source. (string value)
#prefix_delegation_driver = dibbler
# Allow running metadata proxy. (boolean value)
#enable_metadata_proxy = true
# Iptables mangle mark used to mark metadata valid requests. This mark will be masked with 0xffff so that only the lower 16 bits will be
# used. (string value)
#metadata_access_mark = 0x1
# Iptables mangle mark used to mark ingress from external network. This mark will be masked with 0xffff so that only the lower 16 bits will
# be used. (string value)
#external_ingress_mark = 0x2
# Name of bridge used for external network traffic. This should be set to an empty value for the Linux Bridge. When this parameter is set,
# each L3 agent can be associated with no more than one external network. (string value)
#external_network_bridge = br-ex
external_network_bridge =
# Seconds between running periodic tasks (integer value)
#periodic_interval = 40
# Number of separate API worker processes for service. If not specified, the default is equal to the number of CPUs available for best
# performance. (integer value)
#api_workers = <None>
# Number of RPC worker processes for service (integer value)
#rpc_workers = 1
# Number of RPC worker processes dedicated to state reports queue (integer value)
#rpc_state_report_workers = 1
# Range of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0) (integer
# value)
#periodic_fuzzy_delay = 5
# Location to store keepalived/conntrackd config files (string value)
#ha_confs_path = $state_path/ha_confs
# VRRP authentication type (string value)
# Allowed values: AH, PASS
#ha_vrrp_auth_type = PASS
# VRRP authentication password (string value)
#ha_vrrp_auth_password = <None>
# The advertisement interval in seconds (integer value)
#ha_vrrp_advert_int = 2
# Service to handle DHCPv6 Prefix delegation. (string value)
#pd_dhcp_driver = dibbler
# Location to store IPv6 RA config files (string value)
#ra_confs = $state_path/ra
# MinRtrAdvInterval setting for radvd.conf (integer value)
#min_rtr_adv_interval = 30
# MaxRtrAdvInterval setting for radvd.conf (integer value)
#max_rtr_adv_interval = 100
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default INFO level. (boolean value)
#debug = false
debug = False
# If set to false, the logging level will be set to WARNING instead of the default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging
# configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging
# configuration is set in the configuration file and other logging configuration options are ignored (for example,
# logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
# (string value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by use_stderr. This
# option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified
# path instantaneously. It makes sense only if log_file option is specified and Linux platform is used. This option is ignored if
# log_config_append is set. (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if
# log_config_append is set. (boolean value)
#use_syslog = false
# Syslog facility to receive log lines. This option is ignored if log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[AGENT]
#
# From neutron.base.agent
#
# Seconds between nodes reporting state to server; should be less than agent_down_time, best if it is half or less than agent_down_time.
# (floating point value)
#report_interval = 30
# Log agent heartbeats (boolean value)
#log_agent_heartbeats = false

View File

@ -1,158 +0,0 @@
{%- if pillar.neutron.gateway is defined %}
{%- from "neutron/map.jinja" import gateway as neutron with context %}
{%- else %}
{%- from "neutron/map.jinja" import compute as neutron with context %}
{%- endif %}
[DEFAULT]
#
# From neutron.metadata.agent
#
# Location for Metadata Proxy UNIX domain socket. (string value)
#metadata_proxy_socket = $state_path/metadata_proxy
# User (uid or name) running metadata proxy after its initialization (if empty: agent effective user). (string value)
#metadata_proxy_user =
# Group (gid or name) running metadata proxy after its initialization (if empty: agent effective group). (string value)
#metadata_proxy_group =
# Certificate Authority public key (CA cert) file for ssl (string value)
#auth_ca_cert = <None>
# IP address used by Nova metadata server. (string value)
#nova_metadata_ip = 127.0.0.1
nova_metadata_ip = {{ neutron.metadata.host }}
# TCP Port used by Nova metadata server. (port value)
# Minimum value: 0
# Maximum value: 65535
#nova_metadata_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a shared secret to prevent spoofing. You may select any string
# for a secret, but it must match here and in the configuration used by the Nova Metadata Server. NOTE: Nova uses the same config key, but
# in [neutron] section. (string value)
metadata_proxy_shared_secret = {{ neutron.metadata.password }}
# Protocol to access nova metadata, http or https (string value)
# Allowed values: http, https
#nova_metadata_protocol = http
nova_metadata_protocol = http
# Allow to perform insecure SSL (https) requests to nova metadata (boolean value)
#nova_metadata_insecure = false
# Client certificate for nova metadata api server. (string value)
#nova_client_cert =
# Private key of client certificate. (string value)
#nova_client_priv_key =
# Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce mode from metadata_proxy_user/group values, 'user': set
# metadata proxy socket mode to 0o644, to use when metadata_proxy_user is agent effective user or root, 'group': set metadata proxy socket
# mode to 0o664, to use when metadata_proxy_group is agent effective group or root, 'all': set metadata proxy socket mode to 0o666, to use
# otherwise. (string value)
# Allowed values: deduce, user, group, all
#metadata_proxy_socket_mode = deduce
# Number of separate worker processes for metadata server (defaults to half of the number of CPUs) (integer value)
#metadata_workers = 4
# Number of backlog requests to configure the metadata server socket with (integer value)
#metadata_backlog = 4096
# URL to connect to the cache back end. (string value)
#cache_url = memory://
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default INFO level. (boolean value)
#debug = false
debug = False
# If set to false, the logging level will be set to WARNING instead of the default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging
# configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging
# configuration is set in the configuration file and other logging configuration options are ignored (for example,
# logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
# (string value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by use_stderr. This
# option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified
# path instantaneously. It makes sense only if log_file option is specified and Linux platform is used. This option is ignored if
# log_config_append is set. (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if
# log_config_append is set. (boolean value)
#use_syslog = false
# Syslog facility to receive log lines. This option is ignored if log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[AGENT]
#
# From neutron.metadata.agent
#
# Seconds between nodes reporting state to server; should be less than agent_down_time, best if it is half or less than agent_down_time.
# (floating point value)
#report_interval = 30
# Log agent heartbeats (boolean value)
#log_agent_heartbeats = false

View File

@ -1,208 +0,0 @@
{%- from "neutron/map.jinja" import server with context %}
[DEFAULT]
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default INFO level. (boolean value)
#debug = false
# If set to false, the logging level will be set to WARNING instead of the default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging
# configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging
# configuration is set in the configuration file and other logging configuration options are ignored (for example,
# logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
# (string value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by use_stderr. This
# option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified
# path instantaneously. It makes sense only if log_file option is specified and Linux platform is used. This option is ignored if
# log_config_append is set. (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if
# log_config_append is set. (boolean value)
#use_syslog = false
# Syslog facility to receive log lines. This option is ignored if log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[ml2]
#
# From neutron.ml2
#
# List of network type driver entrypoints to be loaded from the neutron.ml2.type_drivers namespace. (list value)
#type_drivers = local,flat,vlan,gre,vxlan,geneve
type_drivers = local,flat,vlan,gre,vxlan
# Ordered list of network_types to allocate as tenant networks. The default value 'local' is useful for single-box testing but provides no
# connectivity between hosts. (list value)
#tenant_network_types = local
tenant_network_types = {{ server.backend.tenant_network_types }}
# An ordered list of networking mechanism driver entrypoints to be loaded from the neutron.ml2.mechanism_drivers namespace. (list value)
#mechanism_drivers =openvswitch,l2population
mechanism_drivers ={%- for backend_name, mechanism in server.backend.get('mechanism', {}).iteritems() %}{{ mechanism.driver }},{%- endfor %}l2population
# An ordered list of extension driver entrypoints to be loaded from the neutron.ml2.extension_drivers namespace. For example:
# extension_drivers = port_security,qos (list value)
extension_drivers = port_security
#extension_drivers =
# Maximum size of an IP packet (MTU) that can traverse the underlying physical network infrastructure without fragmentation for
# overlay/tunnel networks. In most cases, use the same value as the global_physnet_mtu option. (integer value)
#path_mtu = 1500
path_mtu = {{ server.get('global_physnet_mtu', '1500') }}
# A list of mappings of physical networks to MTU values. The format of the mapping is <physnet>:<mtu val>. This mapping allows specifying a
# physical network MTU value that differs from the default global_physnet_mtu value. (list value)
#physical_network_mtus =
physical_network_mtus =physnet1:{{ server.backend.get('external_mtu', '1500') }}{%- if "vlan" in server.backend.tenant_network_types %},physnet2:{{ server.backend.get('external_mtu', '1500') }}{%- endif %}
# Default network type for external networks when no provider attributes are specified. By default it is None, which means that if provider
# attributes are not specified while creating external networks then they will have the same type as tenant networks. Allowed values for
# external_network_type config option depend on the network type values configured in type_drivers config option. (string value)
#external_network_type = <None>
[ml2_type_flat]
#
# From neutron.ml2
#
# List of physical_network names with which flat networks can be created. Use default '*' to allow flat networks with arbitrary
# physical_network names. Use an empty list to disable flat networks. (list value)
#flat_networks = *
flat_networks = *
[ml2_type_geneve]
#
# From neutron.ml2
#
# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of Geneve VNI IDs that are available for tenant network allocation
# (list value)
#vni_ranges =
# Geneve encapsulation header size is dynamic, this value is used to calculate the maximum MTU for the driver. This is the sum of the sizes
# of the outer ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, which is the size of the Geneve header without
# any additional option headers. (integer value)
#max_header_size = 50
[ml2_type_gre]
#
# From neutron.ml2
#
# Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
# (list value)
#tunnel_id_ranges =
tunnel_id_ranges =2:65535
[ml2_type_vlan]
#
# From neutron.ml2
#
# List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each available for allocation to tenant networks. (list value)
#network_vlan_ranges =
network_vlan_ranges = physnet1{%- if server.backend.external_vlan_range is defined %}:{{ server.backend.external_vlan_range }}{%- endif %}{%- if "vlan" in server.backend.tenant_network_types %},physnet2:{{ server.backend.tenant_vlan_range }}{%- endif %}
[ml2_type_vxlan]
#
# From neutron.ml2
#
# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation
# (list value)
#vni_ranges =
vni_ranges =2:65535
# Multicast group for VXLAN. When configured, will enable sending all broadcast traffic to this multicast group. When left unconfigured,
# will disable multicast VXLAN mode. (string value)
#vxlan_group = <None>
vxlan_group = 224.0.0.1
[securitygroup]
#
# From neutron.ml2
#
# Driver for security groups firewall in the L2 agent (string value)
#firewall_driver = <None>
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
# Controls whether the neutron security group API is enabled in the server. It should be false when using no security groups or using the
# nova security group API. (boolean value)
#enable_security_group = true
enable_security_group = True
# Use ipset to speed-up the iptables based security groups. Enabling ipset support requires that ipset is installed on L2 agent node.
# (boolean value)
#enable_ipset = true

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +0,0 @@
# Generated by Salt.
{%- from "neutron/map.jinja" import server with context %}
# defaults for neutron-server
# path to config file corresponding to the core_plugin specified in
# neutron.conf
#NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
{%- if server.backend.engine == "ml2" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/ml2/ml2_conf.ini"
{%- endif %}
{%- if server.backend.engine == "contrail" %}
NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
{%- endif %}

File diff suppressed because it is too large Load Diff

View File

@ -1,252 +0,0 @@
{%- if pillar.neutron.gateway is defined %}
{%- from "neutron/map.jinja" import gateway as neutron with context %}
{%- else %}
{%- from "neutron/map.jinja" import compute as neutron with context %}
{%- endif %}
[DEFAULT]
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default INFO level. (boolean value)
#debug = false
# If set to false, the logging level will be set to WARNING instead of the default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging
# configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging
# configuration is set in the configuration file and other logging configuration options are ignored (for example,
# logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
# (string value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by use_stderr. This
# option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified
# path instantaneously. It makes sense only if log_file option is specified and Linux platform is used. This option is ignored if
# log_config_append is set. (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if
# log_config_append is set. (boolean value)
#use_syslog = false
# Syslog facility to receive log lines. This option is ignored if log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[agent]
#
# From neutron.ml2.ovs.agent
#
# The number of seconds the agent will wait between polling for local device changes. (integer value)
#polling_interval = 2
# Minimize polling by monitoring ovsdb for interface changes. (boolean value)
#minimize_polling = true
# The number of seconds to wait before respawning the ovsdb monitor after losing communication with it. (integer value)
#ovsdb_monitor_respawn_interval = 30
# Network types supported by the agent (gre and/or vxlan). (list value)
#tunnel_types =
{%- if "vxlan" in neutron.backend.tenant_network_types %}
tunnel_types =vxlan
# The UDP port to use for VXLAN tunnels. (port value)
# Minimum value: 0
# Maximum value: 65535
#vxlan_udp_port = 4789
vxlan_udp_port = 4789
# MTU size of veth interfaces (integer value)
#veth_mtu = 9000
# Use ML2 l2population mechanism driver to learn remote MAC and IPs and improve tunnel scalability. (boolean value)
#l2_population = false
l2_population = True
# Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 l2population driver. Allows the switch (when supporting an
# overlay) to respond to an ARP request locally without performing a costly ARP broadcast into the overlay. (boolean value)
#arp_responder = false
arp_responder = True
{%- endif %}
# Enable suppression of ARP responses that don't match an IP address that belongs to the port from which they originate. Note: This prevents
# the VMs attached to this agent from spoofing, it doesn't protect them from other devices which have the capability to spoof (e.g. bare
# metal or VMs attached to agents without this flag set to True). Spoofing rules will not be added to any ports that have port security
# disabled. For LinuxBridge, this requires ebtables. For OVS, it requires a version that supports matching ARP headers. This option will be
# removed in Newton so the only way to disable protection will be via the port security extension. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#prevent_arp_spoofing = true
# Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/VXLAN tunnel. (boolean value)
#dont_fragment = true
# Make the l2 agent run in DVR mode. (boolean value)
#enable_distributed_routing = false
enable_distributed_routing = {{ neutron.get('dvr', 'False') }}
# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If value is set to 0, rpc timeout won't be changed (integer
# value)
#quitting_rpc_timeout = 10
# Reset flow table on start. Setting this to True will cause brief traffic interruption. (boolean value)
#drop_flows_on_start = false
drop_flows_on_start = False
# Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/VXLAN tunnel. (boolean value)
#tunnel_csum = false
# Selects the Agent Type reported (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#agent_type = Open vSwitch agent
[ovs]
#
# From neutron.ml2.ovs.agent
#
# Integration bridge to use. Do not change this parameter unless you have a good reason to. This is the name of the OVS integration bridge.
# There is one per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM VIFs are attached to this bridge and then
# 'patched' according to their network connectivity. (string value)
#integration_bridge = br-int
integration_bridge = br-int
# Tunnel bridge to use. (string value)
#tunnel_bridge = br-tun
tunnel_bridge = br-tun
# Peer patch port in integration bridge for tunnel bridge. (string value)
#int_peer_patch_port = patch-tun
# Peer patch port in tunnel bridge for integration bridge. (string value)
#tun_peer_patch_port = patch-int
# Local IP address of tunnel endpoint. Can be either an IPv4 or IPv6 address. (IP address value)
#local_ip = <None>
{%- if "vxlan" in neutron.backend.tenant_network_types %}
local_ip = {{ neutron.local_ip }}
{%- endif %}
# Comma-separated list of <physical_network>:<bridge> tuples mapping physical network names to the agent's node-specific Open vSwitch bridge
# names to be used for flat and VLAN networks. The length of bridge names should be no more than 11. Each bridge must exist, and should have
# a physical network interface configured as a port. All physical networks configured on the server should have mappings to appropriate
# bridges on each agent. Note: If you remove a bridge from this mapping, make sure to disconnect it from the integration bridge as it won't
# be managed by the agent anymore. Deprecated for ofagent. (list value)
#bridge_mappings =
{%- if "vlan" in neutron.backend.tenant_network_types %}
bridge_mappings ={%- if neutron.get('external_access', True) %}physnet1:br-floating,{%- endif %}physnet2:br-prv
{%- elif neutron.get('external_access', True) %}
bridge_mappings =physnet1:br-floating
{%- endif %}
# Use veths instead of patch ports to interconnect the integration bridge to physical networks. Support kernel without Open vSwitch patch
# port support so long as it is set to True. (boolean value)
#use_veth_interconnection = false
# OpenFlow interface to use. (string value)
# Allowed values: ovs-ofctl, native
#of_interface = ovs-ofctl
# OVS datapath to use. 'system' is the default value and corresponds to the kernel datapath. To enable the userspace datapath set this value
# to 'netdev'. (string value)
# Allowed values: system, netdev
#datapath_type = system
# OVS vhost-user socket directory. (string value)
#vhostuser_socket_dir = /var/run/openvswitch
# Address to listen on for OpenFlow connections. Used only for 'native' driver. (IP address value)
#of_listen_address = 127.0.0.1
# Port to listen on for OpenFlow connections. Used only for 'native' driver. (port value)
# Minimum value: 0
# Maximum value: 65535
#of_listen_port = 6633
# Timeout in seconds to wait for the local switch connecting the controller. Used only for 'native' driver. (integer value)
#of_connect_timeout = 30
# Timeout in seconds to wait for a single OpenFlow request. Used only for 'native' driver. (integer value)
#of_request_timeout = 10
# The interface for interacting with the OVSDB (string value)
# Allowed values: vsctl, native
#ovsdb_interface = vsctl
# The connection string for the native OVSDB backend. Requires the native ovsdb_interface to be enabled. (string value)
#ovsdb_connection = tcp:127.0.0.1:6640
[securitygroup]
#
# From neutron.ml2.ovs.agent
#
# Driver for security groups firewall in the L2 agent (string value)
#firewall_driver = <None>
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
# Controls whether the neutron security group API is enabled in the server. It should be false when using no security groups or using the
# nova security group API. (boolean value)
#enable_security_group = true
enable_security_group = True
# Use ipset to speed-up the iptables based security groups. Enabling ipset support requires that ipset is installed on L2 agent node.
# (boolean value)
#enable_ipset = true

View File

@ -1,58 +0,0 @@
{% from "neutron/map.jinja" import gateway with context %}
{%- if gateway.enabled %}
neutron_gateway_packages:
pkg.installed:
- names: {{ gateway.pkgs }}
{%- if pillar.neutron.server is not defined %}
/etc/neutron/neutron.conf:
file.managed:
- source: salt://neutron/files/{{ gateway.version }}/neutron-generic.conf.{{ grains.os_family }}
- template: jinja
- require:
- pkg: neutron_gateway_packages
{%- endif %}
/etc/neutron/l3_agent.ini:
file.managed:
- source: salt://neutron/files/{{ gateway.version }}/l3_agent.ini
- template: jinja
- require:
- pkg: neutron_gateway_packages
/etc/neutron/dhcp_agent.ini:
file.managed:
- source: salt://neutron/files/{{ gateway.version }}/dhcp_agent.ini
- require:
- pkg: neutron_gateway_packages
/etc/neutron/metadata_agent.ini:
file.managed:
- source: salt://neutron/files/{{ gateway.version }}/metadata_agent.ini
- template: jinja
- require:
- pkg: neutron_gateway_packages
/etc/neutron/plugins/ml2/openvswitch_agent.ini:
file.managed:
- source: salt://neutron/files/{{ gateway.version }}/openvswitch_agent.ini
- template: jinja
- require:
- pkg: neutron_gateway_packages
neutron_gateway_services:
service.running:
- names: {{ gateway.services }}
- enable: true
- watch:
- file: /etc/neutron/neutron.conf
- file: /etc/neutron/l3_agent.ini
- file: /etc/neutron/metadata_agent.ini
- file: /etc/neutron/plugins/ml2/openvswitch_agent.ini
- file: /etc/neutron/dhcp_agent.ini
{%- endif %}

View File

@ -1,11 +0,0 @@
include:
{% if pillar.neutron.server is defined %}
- neutron.server
{% endif %}
{% if pillar.neutron.gateway is defined %}
- neutron.gateway
{% endif %}
{% if pillar.neutron.compute is defined %}
- neutron.compute
{% endif %}

View File

@ -1,68 +0,0 @@
{% set compute = salt['grains.filter_by']({
'Debian': {
'pkgs': ['neutron-openvswitch-agent', 'openvswitch-switch', 'openvswitch-datapath-dkms', 'python-pycadf'],
'services': ['neutron-openvswitch-agent'],
'audit': {
'enabled': false
}
},
'RedHat': {
'pkgs': ['openstack-neutron-openvswitch', 'openvswitch', 'python-pycadf'],
'services': ['neutron-openvswitch-agent'],
'audit': {
'enabled': false
}
},
}, merge=pillar.neutron.get('compute', {})) %}
{% set gateway = salt['grains.filter_by']({
'Debian': {
'pkgs': ['neutron-dhcp-agent', 'neutron-openvswitch-agent', 'neutron-l3-agent', 'openvswitch-common', 'neutron-metadata-agent'],
'services': ['neutron-openvswitch-agent', 'neutron-metadata-agent', 'neutron-l3-agent', 'neutron-dhcp-agent']
},
'RedHat': {
'pkgs': ['openstack-neutron-openvswitch'],
'services': ['neutron-openvswitch-agent', 'neutron-metadata-agent', 'neutron-l3-agent', 'neutron-dhcp-agent']
},
}, merge=pillar.neutron.get('gateway', {})) %}
{% set server = salt['grains.filter_by']({
'Debian': {
'pkgs': ['neutron-server','neutron-lbaas-agent', 'gettext-base', 'python-pycadf'],
'pkgs_ml2': ['neutron-plugin-ml2'],
'services': ['neutron-server'],
'notification': False,
'audit': {
'enabled': false
}
},
'RedHat': {
'pkgs_ml2': ['openstack-neutron-ml2', 'python-pycadf'],
'pkgs': ['openstack-neutron'],
'services': ['neutron-server'],
'notification': False,
'audit': {
'enabled': false
}
},
}, merge=pillar.neutron.get('server', {})) %}
{%- if pillar.neutron.server is defined %}
{%- set tmp_server = pillar.neutron.server %}
{%- if not tmp_server.backend is defined %}
{%- if tmp_server.plugin == "contrail" %}
{%- from "opencontrail/map.jinja" import config with context %}
{%- set backend = {'engine': tmp_server.plugin, 'host': config.discovery.host, 'token': config.identity.token, 'user': config.identity.user, 'password': config.identity.password, 'tenant': config.identity.tenant} %}
{%- set tmp = server.update({'backend': backend}) %}
{%- endif %}
{%- endif %}
{%- endif %}

View File

@ -1,19 +0,0 @@
{%- if pillar.neutron.server is defined %}
{%- from "neutron/map.jinja" import server with context %}
{%- if server.get('enabled', False) %}
local_plugin:
collectd_check_local_endpoint:
endpoint:
neutron-api:
expected_code: 200
url: "http://{{ server.bind.address|replace('0.0.0.0', '127.0.0.1') }}:{{ server.bind.port }}/"
remote_plugin:
openstack_neutron:
plugin: python
template: neutron/files/collectd_openstack_neutron.conf
url: "http://{{ server.identity.host }}:{{ server.identity.port }}/v{% if server.identity.get('api_version', 2)|int == 2 %}2.0{% else %}3{% endif %}"
username: {{ server.identity.user }}
password: {{ server.identity.password }}
tenant: {{ server.identity.tenant }}
{%- endif %}
{%- endif %}

View File

@ -1,8 +0,0 @@
config:
{%- if pillar.neutron.server is defined %}
{%- from "neutron/map.jinja" import server with context %}
neutron.conf:
source: "salt://neutron/files/{{ server.version }}/neutron-server.conf.{{ grains.os_family|default('Debian') }}"
template: jinja
{%- endif %}

View File

@ -1,11 +0,0 @@
{%- if pillar.neutron.get('server', {}).get('backend', {}).engine is defined and pillar.neutron.server.backend.engine == "ml2" %}
dashboard:
neutron:
format: json
template: neutron/files/grafana_dashboards/neutron_openvswitch_influxdb.json
{%- elif pillar.neutron.server is defined %}
dashboard:
neutron:
format: json
template: neutron/files/grafana_dashboards/neutron_influxdb.json
{%- endif %}

View File

@ -1,277 +0,0 @@
{%- from "neutron/map.jinja" import server with context %}
{%- if server.get('backend', {}).engine is defined and server.backend.engine == "ml2" %}
{% set neutron_agents = ('l3', 'dhcp', 'metadata', 'openvswitch') %}
{%- else %}
{% set neutron_agents = () %}
{%- endif %}
{% set ovs_support = pillar.neutron.get('compute', {}).get('enabled', False) or pillar.neutron.get('gateway', {}).get('enabled', False) %}
log_collector:
decoder:
neutron:
engine: sandbox
module_file: /usr/share/lma_collector/decoders/openstack_log.lua
module_dir: /usr/share/lma_collector/common;/usr/share/heka/lua_modules
adjust_timezone: true
{%- if ovs_support %}
ovs:
engine: sandbox
module_file: /usr/share/lma_collector/decoders/ovs_log.lua
module_dir: /usr/share/lma_collector/common;/usr/share/heka/lua_modules
{%- endif %}
splitter:
neutron:
engine: token
delimiter: '\n'
input:
neutron_log:
engine: logstreamer
log_directory: "/var/log"
file_match: 'neutron/(?P<Service>(dhcp-agent|l3-agent|metadata-agent|neutron-netns-cleanup|openvswitch-agent|neutron-lbaas-agent|server))\.log\.?(?P<Seq>\d*)$'
differentiator: ['neutron', '_', 'Service']
priority: ["^Seq"]
decoder: "neutron_decoder"
splitter: "neutron_splitter"
{%- if ovs_support %}
ovs_log:
engine: logstreamer
log_directory: "/var/log/openvswitch"
file_match: '(?P<Service>ovs\-vswitchd|ovsdb\-server|ovs\-ctl)\.log$'
differentiator: ['Service']
priority: ["^Seq"]
decoder: "ovs_decoder"
splitter: "TokenSplitter"
{%- endif %}
metric_collector:
trigger:
neutron_logs_error:
description: 'Too many errors have been detected in Neutron logs'
severity: warning
no_data_policy: okay
rules:
- metric: log_messages
field:
service: neutron
level: error
relational_operator: '>'
threshold: 0.1
window: 70
periods: 0
function: max
{%- if pillar.neutron.server is defined %}
neutron_api_local_endpoint:
description: 'Neutron API is locally down'
severity: down
rules:
- metric: openstack_check_local_api
field:
service: neutron-api
relational_operator: '=='
threshold: 0
window: 60
periods: 0
function: last
{%- endif %}
alarm:
{%- if pillar.neutron.server is defined %}
neutron_logs_control:
alerting: enabled
triggers:
- neutron_logs_error
dimension:
service: neutron-logs-control
neutron_api_endpoint:
alerting: enabled
triggers:
- neutron_api_local_endpoint
dimension:
service: neutron-api-endpoint
{%- else %}
neutron_logs_data:
alerting: enabled
triggers:
- neutron_logs_error
dimension:
service: neutron-logs-data
{%- endif %}
{%- if pillar.neutron.server is defined %}
remote_collector:
trigger:
neutron_api_check_failed:
description: 'Endpoint check for neutron-api is failed'
severity: down
rules:
- metric: openstack_check_api
field:
service: neutron-api
relational_operator: '=='
threshold: 0
window: 60
periods: 0
function: last
{%- for agent in neutron_agents %}
neutron_{{ agent }}_two_up:
description: 'Some Neutron {{ agent }} agents are down'
severity: warning
logical_operator: and
rules:
- metric: openstack_neutron_agents
field:
service: {{ agent }}
state: up
relational_operator: '>='
threshold: 2
window: 60
periods: 0
function: last
- metric: openstack_neutron_agents
field:
service: {{ agent }}
state: down
relational_operator: '>'
threshold: 0
window: 60
periods: 0
function: last
neutron_{{ agent }}_one_up:
description: 'Only one Neutron {{ agent }} agent is up'
severity: critical
logical_operator: and
rules:
- metric: openstack_neutron_agents
field:
service: {{ agent }}
state: up
relational_operator: '=='
threshold: 1
window: 60
periods: 0
function: last
- metric: openstack_neutron_agents_percent
field:
service: {{ agent }}
state: up
relational_operator: '<'
threshold: 100
window: 60
periods: 0
function: last
neutron_{{ agent }}_zero_up:
description: 'All Neutron {{ agent }} agents are down or disabled'
severity: down
rules:
- metric: openstack_neutron_agents
field:
service: {{ agent }}
state: up
relational_operator: '=='
threshold: 0
window: 60
periods: 0
function: last
{%- endfor %}
alarm:
neutron_api_check:
triggers:
- neutron_api_check_failed
dimension:
service: neutron-api-check
{%- for agent in neutron_agents %}
neutron_{{ agent }}:
alerting: enabled
triggers:
- neutron_{{ agent }}_zero_up
- neutron_{{ agent }}_one_up
- neutron_{{ agent }}_two_up
dimension:
service: neutron-{{ agent }}
{%- endfor %}
{%- endif %}
aggregator:
alarm_cluster:
{%- if pillar.neutron.server is defined %}
neutron_logs_control:
policy: status_of_members
alerting: enabled
group_by: hostname
match:
service: neutron-logs-control
members:
- neutron_logs_control
dimension:
service: neutron-control
nagios_host: 01-service-clusters
neutron_api_endpoint:
policy: availability_of_members
alerting: enabled
group_by: hostname
match:
service: neutron-api-endpoint
members:
- neutron_api_endpoint
dimension:
service: neutron-control
nagios_host: 01-service-clusters
neutron_api_check:
policy: highest_severity
alerting: enabled
match:
service: neutron-api-check
members:
- neutron_api_check
dimension:
service: neutron-control
nagios_host: 01-service-clusters
neutron_control:
policy: highest_severity
alerting: enabled_with_notification
match:
service: neutron-control
members:
- neutron_logs_control
- neutron_api_endpoint
- neutron_api_check
dimension:
cluster_name: neutron-control
nagios_host: 00-top-clusters
{%- for agent in neutron_agents %}
neutron_{{ agent }}:
policy: highest_severity
alerting: enabled
match:
service: neutron-{{ agent }}
members:
- neutron_{{ agent }}
dimension:
service: neutron-data
nagios_host: 01-service-clusters
{%- endfor %}
{%- if neutron_agents|length > 0 %}
neutron_data:
policy: highest_severity
alerting: enabled_with_notification
match:
service: neutron-data
members:
- neutron_logs_data
{%- for agent in neutron_agents %}
- neutron_{{ agent }}
{%- endfor %}
dimension:
cluster_name: neutron-data
nagios_host: 00-top-clusters
{%- endif %}
{%- else %}
neutron_logs_data:
policy: status_of_members
alerting: enabled
group_by: hostname
match:
service: neutron-logs-data
members:
- neutron_logs_data
dimension:
service: neutron-data
nagios_host: 01-service-clusters
{%- endif %}

View File

@ -1,11 +0,0 @@
orchestrate:
server:
priority: 580
batch: 1
require:
- salt: keystone.server
compute:
priority: 590
require:
- salt: neutron.server

View File

@ -1,7 +0,0 @@
check:
local_neutron_server_proc:
command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C neutron-server -u neutron -c 1:30"
interval: 60
occurrences: 1
subscribers:
- local-neutron-server

View File

@ -1,41 +0,0 @@
doc:
name: Neutron
description: Neutron is an OpenStack project to provide networking as a service between interface devices managed by other Openstack services.
role:
{%- if pillar.neutron.server is defined %}
{%- from "neutron/map.jinja" import server with context %}
server:
name: server
endpoint:
neutron_server:
name: neutron-server
type: neutron-server
address: http://{{ server.bind.address }}:{{ server.bind.port }}
protocol: http
param:
bind:
value: {{ server.bind.address }}:{{ server.bind.port }}
plugin:
value: {{ server.plugin }}
version:
name: "Version"
value: {{ server.version }}
database_host:
name: "Database"
value: {{ server.database.user }}@{{ server.database.host }}:{{ server.database.port }}//{{ server.database.name }}
message_queue_ip:
name: "Message queue"
value: {{ server.message_queue.user }}@{{ server.message_queue.host }}:{{ server.message_queue.port }}{{ server.message_queue.virtual_host }}
compute_host:
name: "Compute service"
value: {{ server.compute.user }}@{{ server.compute.host }}
identity_host:
name: "Identity service"
value: {{ server.identity.user }}@{{ server.identity.host }}:{{ server.identity.port }}
packages:
value: |
{%- for pkg in server.pkgs %}
{%- set pkg_version = "dpkg -l "+pkg+" | grep "+pkg+" | awk '{print $3}'" %}
* {{ pkg }}: {{ salt['cmd.run'](pkg_version) }}
{%- endfor %}
{%- endif %}

View File

@ -1,167 +0,0 @@
{%- from "neutron/map.jinja" import server with context %}
{%- if server.enabled %}
neutron_server_packages:
pkg.installed:
- names: {{ server.pkgs }}
{% if server.backend.engine == "contrail" %}
/etc/neutron/plugins/opencontrail/ContrailPlugin.ini:
file.managed:
- source: salt://neutron/files/{{ server.version }}/ContrailPlugin.ini
- template: jinja
- require:
- pkg: neutron_server_packages
- pkg: neutron_contrail_package
contrail_plugin_link:
cmd.run:
- names:
- ln -s /etc/neutron/plugins/opencontrail/ContrailPlugin.ini /etc/neutron/plugin.ini
- unless: test -e /etc/neutron/plugin.ini
- require:
- file: /etc/neutron/plugins/opencontrail/ContrailPlugin.ini
neutron_contrail_package:
pkg.installed:
- name: neutron-plugin-contrail
neutron_server_service:
service.running:
- name: neutron-server
- enable: true
- watch:
- file: /etc/neutron/neutron.conf
{%- endif %}
{% if server.backend.engine == "ml2" %}
/etc/neutron/plugins/ml2/ml2_conf.ini:
file.managed:
- source: salt://neutron/files/{{ server.version }}/ml2_conf.ini
- template: jinja
- require:
- pkg: neutron_server_packages
ml2_plugin_link:
cmd.run:
- names:
- ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
- unless: test -e /etc/neutron/plugin.ini
- require:
- file: /etc/neutron/plugins/ml2/ml2_conf.ini
neutron_db_manage:
cmd.run:
- name: neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
- require:
- file: /etc/neutron/neutron.conf
- file: /etc/neutron/plugins/ml2/ml2_conf.ini
{%- endif %}
/etc/neutron/neutron.conf:
file.managed:
- source: salt://neutron/files/{{ server.version }}/neutron-server.conf.{{ grains.os_family }}
- template: jinja
- require:
- pkg: neutron_server_packages
/etc/neutron/api-paste.ini:
file.managed:
- source: salt://neutron/files/{{ server.version }}/api-paste.ini.{{ grains.os_family }}
- template: jinja
- require:
- pkg: neutron_server_packages
{%- if grains.os_family == "Debian" %}
/etc/default/neutron-server:
file.managed:
- source: salt://neutron/files/{{ server.version }}/neutron-server
- template: jinja
- require:
- pkg: neutron_server_packages
{%- if not grains.get('noservices', False) %}
- watch_in:
- service: neutron_server_services
{%- endif %}
{%- endif %}
{%- if server.backend.engine == "midonet" %}
/etc/neutron/plugins/midonet/midonet.ini:
file.managed:
- source: salt://neutron/files/{{ server.version }}/midonet.ini
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- template: jinja
neutron_db_manage:
cmd.run:
- name: neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/midonet/midonet.ini upgrade head
- require:
- file: /etc/neutron/neutron.conf
- file: /etc/neutron/plugins/midonet/midonet.ini
{%- if server.version == "kilo" %}
midonet_neutron_packages:
pkg.installed:
- names:
- python-neutron-plugin-midonet
- python-neutron-lbaas
midonet-db-manage:
cmd.run:
- name: midonet-db-manage upgrade head
{%- else %}
midonet_neutron_packages:
pkg.installed:
- names:
- python-networking-midonet
- python-neutron-lbaas
- python-neutron-fwaas
neutron_db_manage:
cmd.run:
- name: neutron-db-manage --subproject networking-midonet upgrade head
- require:
- file: /etc/neutron/neutron.conf
- file: /etc/neutron/plugins/midonet/midonet.ini
{%- endif %}
{%- endif %}
{%- if not grains.get('noservices', False) %}
neutron_server_services:
service.running:
- names: {{ server.services }}
- enable: true
- watch:
- file: /etc/neutron/neutron.conf
{%- endif %}
{%- if grains.get('virtual_subtype', None) == "Docker" %}
neutron_entrypoint:
file.managed:
- name: /entrypoint.sh
- template: jinja
- source: salt://neutron/files/entrypoint.sh
- mode: 755
{%- endif %}
{%- endif %}

View File

@ -1,24 +0,0 @@
neutron:
compute:
agent_mode: dvr
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
dvr: true
enabled: true
external_access: true
local_ip: 10.1.0.105
message_queue:
engine: rabbitmq
host: 172.16.10.254
password: workshop
port: 5672
user: openstack
virtual_host: /openstack
metadata:
host: 172.16.10.254
password: password
version: mitaka

View File

@ -1,24 +0,0 @@
neutron:
compute:
agent_mode: legacy
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
dvr: false
enabled: true
external_access: false
local_ip: 10.1.0.105
message_queue:
engine: rabbitmq
host: 172.16.10.254
password: workshop
port: 5672
user: openstack
virtual_host: /openstack
metadata:
host: 172.16.10.254
password: password
version: mitaka

View File

@ -1,24 +0,0 @@
neutron:
compute:
agent_mode: dvr
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
dvr: true
enabled: true
external_access: false
local_ip: 10.1.0.105
message_queue:
engine: rabbitmq
host: 172.16.10.254
password: workshop
port: 5672
user: openstack
virtual_host: /openstack
metadata:
host: 172.16.10.254
password: password
version: mitaka

View File

@ -1,51 +0,0 @@
neutron:
server:
enabled: true
fwaas: false
dns_domain: novalocal
tunnel_type: vxlan
version: liberty
backend:
engine: contrail
host: 127.0.0.1
user: admin
password: password
token: token
tenant: admin
bind:
address: 127.0.0.1
port: 9696
database:
engine: mysql
host: 127.0.0.1
port: 3306
name: neutron
user: neutron
password: password
identity:
engine: keystone
region: RegionOne
host: 127.0.0.1
port: 35357
user: neutron
password: password
tenant: service
message_queue:
engine: rabbitmq
members:
- host: 127.0.0.1
- host: 127.0.1.1
- host: 127.0.2.1
user: openstack
password: password
virtual_host: '/openstack'
compute:
host: 127.0.0.1
region: RegionOne
user: nova
password: password
tenant: service
audit:
filter_factory: 'keystonemiddleware.audit:filter_factory'
map_file: '/etc/pycadf/neutron_api_audit_map.conf'

View File

@ -1,47 +0,0 @@
neutron:
server:
backend:
engine: ml2
external_mtu: 1500
mechanism:
ovs:
driver: openvswitch
tenant_network_types: flat,vxlan
bind:
address: 172.16.10.101
port: 9696
compute:
host: 172.16.10.254
password: workshop
region: RegionOne
tenant: service
user: nova
database:
engine: mysql
host: 172.16.10.254
name: neutron
password: workshop
port: 3306
user: neutron
dns_domain: novalocal
dvr: true
enabled: true
global_physnet_mtu: 1500
identity:
engine: keystone
host: 172.16.10.254
password: workshop
port: 35357
region: RegionOne
tenant: service
user: neutron
l3_ha: false
message_queue:
engine: rabbitmq
host: 172.16.10.254
password: workshop
port: 5672
user: openstack
virtual_host: /openstack
plugin: ml2
version: mitaka

View File

@ -1,47 +0,0 @@
neutron:
server:
backend:
engine: ml2
external_mtu: 1500
mechanism:
ovs:
driver: openvswitch
tenant_network_types: flat,vxlan
bind:
address: 172.16.10.101
port: 9696
compute:
host: 172.16.10.254
password: workshop
region: RegionOne
tenant: service
user: nova
database:
engine: mysql
host: 172.16.10.254
name: neutron
password: workshop
port: 3306
user: neutron
dns_domain: novalocal
dvr: false
enabled: true
global_physnet_mtu: 1500
identity:
engine: keystone
host: 172.16.10.254
password: workshop
port: 35357
region: RegionOne
tenant: service
user: neutron
l3_ha: True
message_queue:
engine: rabbitmq
host: 172.16.10.254
password: workshop
port: 5672
user: openstack
virtual_host: /openstack
plugin: ml2
version: mitaka

View File

@ -1,45 +0,0 @@
neutron:
server:
enabled: true
backend:
engine: contrail
host: 127.0.0.1
user: admin
password: password
token: token
tenant: admin
fwaas: false
dns_domain: novalocal
tunnel_type: vxlan
version: liberty
bind:
address: 127.0.0.1
port: 9696
database:
engine: mysql
host: 127.0.0.1
port: 3306
name: neutron
user: neutron
password: password
identity:
engine: keystone
region: RegionOne
host: 127.0.0.1
port: 35357
user: neutron
password: password
tenant: service
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: password
virtual_host: '/openstack'
compute:
host: 127.0.0.1
region: RegionOne
user: nova
password: password
tenant: service

View File

@ -1,24 +0,0 @@
neutron:
gateway:
agent_mode: dvr_snat
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
dvr: true
enabled: true
external_access: True
local_ip: 10.1.0.110
message_queue:
engine: rabbitmq
host: 172.16.10.254
password: workshop
port: 5672
user: openstack
virtual_host: /openstack
metadata:
host: 172.16.10.254
password: password
version: mitaka

View File

@ -1,24 +0,0 @@
neutron:
gateway:
agent_mode: legacy
backend:
engine: ml2
tenant_network_types: "flat,vxlan"
mechanism:
ovs:
driver: openvswitch
dvr: false
enabled: true
external_access: True
local_ip: 10.1.0.110
message_queue:
engine: rabbitmq
host: 172.16.10.254
password: workshop
port: 5672
user: openstack
virtual_host: /openstack
metadata:
host: 172.16.10.254
password: password
version: mitaka

View File

@ -1,163 +0,0 @@
#!/usr/bin/env bash
set -e
[ -n "$DEBUG" ] && set -x
CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
METADATA=${CURDIR}/../metadata.yml
FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
## Overrideable parameters
PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
BUILDDIR=${BUILDDIR:-${CURDIR}/build}
VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
DEPSDIR=${BUILDDIR}/deps
SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR} --log-file=/dev/null"
if [ "x${SALT_VERSION}" != "x" ]; then
PIP_SALT_VERSION="==${SALT_VERSION}"
fi
## Functions
log_info() {
echo "[INFO] $*"
}
log_err() {
echo "[ERROR] $*" >&2
}
setup_virtualenv() {
log_info "Setting up Python virtualenv"
virtualenv $VENV_DIR
source ${VENV_DIR}/bin/activate
pip install salt${PIP_SALT_VERSION}
}
setup_pillar() {
[ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
echo "base:" > ${SALT_PILLAR_DIR}/top.sls
for pillar in ${PILLARDIR}/*; do
state_name=$(basename ${pillar%.sls})
echo -e " ${state_name}:\n - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
done
}
setup_salt() {
[ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
[ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
[ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
echo "base:" > ${SALT_FILE_DIR}/top.sls
for pillar in ${PILLARDIR}/*.sls; do
state_name=$(basename ${pillar%.sls})
echo -e " ${state_name}:\n - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
done
cat << EOF > ${SALT_CONFIG_DIR}/minion
file_client: local
cachedir: ${SALT_CACHE_DIR}
verify_env: False
minion_id_caching: False
file_roots:
base:
- ${SALT_FILE_DIR}
- ${CURDIR}/..
- /usr/share/salt-formulas/env
pillar_roots:
base:
- ${SALT_PILLAR_DIR}
- ${PILLARDIR}
EOF
}
fetch_dependency() {
dep_name="$(echo $1|cut -d : -f 1)"
dep_source="$(echo $1|cut -d : -f 2-)"
dep_root="${DEPSDIR}/$(basename $dep_source .git)"
dep_metadata="${dep_root}/metadata.yml"
[ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
[ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
log_info "Fetching dependency $dep_name"
[ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
git clone $dep_source ${DEPSDIR}/$(basename $dep_source .git)
ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
METADATA="${dep_metadata}" install_dependencies
}
install_dependencies() {
grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
(python - | while read dep; do fetch_dependency "$dep"; done) << EOF
import sys,yaml
for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
print '%s:%s' % (dep["name"], dep["source"])
EOF
}
clean() {
log_info "Cleaning up ${BUILDDIR}"
[ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
}
salt_run() {
[ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
salt-call ${SALT_OPTS} $*
}
prepare() {
[ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
which salt-call || setup_virtualenv
setup_pillar
setup_salt
install_dependencies
}
run() {
for pillar in ${PILLARDIR}/*.sls; do
state_name=$(basename ${pillar%.sls})
salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
done
}
_atexit() {
RETVAL=$?
trap true INT TERM EXIT
if [ $RETVAL -ne 0 ]; then
log_err "Execution failed"
else
log_info "Execution successful"
fi
return $RETVAL
}
## Main
trap _atexit INT TERM EXIT
case $1 in
clean)
clean
;;
prepare)
prepare
;;
run)
run
;;
*)
prepare
run
;;
esac