Merge remote-tracking branch 'origin/master' into merge-branch

Change-Id: I9c29ad9564671ae5a2db35835bc4a30e75482cb2
This commit is contained in:
Doug Wiegley 2015-08-30 21:23:46 -06:00
commit 2c5f44e1b3
421 changed files with 20068 additions and 8305 deletions

View File

@ -15,7 +15,10 @@ The latest and most in-depth documentation on how to use Neutron is
available at: <http://docs.openstack.org>. This includes:
Neutron Administrator Guide
http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html
http://docs.openstack.org/admin-guide-cloud/networking.html
Networking Guide
http://docs.openstack.org/networking-guide/
Neutron API Reference:
http://docs.openstack.org/api/openstack-network/2.0/content/

View File

@ -309,6 +309,10 @@ current unit tests coverage by running::
$ ./run_tests.sh -c
Since the coverage command can only show unit test coverage, a coverage
document is maintained that shows test coverage per area of code in:
doc/source/devref/testing_coverage.rst.
Debugging
---------

View File

@ -24,7 +24,8 @@ responsible determining whether a command is safe to execute.
from __future__ import print_function
from six.moves import configparser as ConfigParser
import json
from oslo_serialization import jsonutils as json
import os
import select
import sys

13
devstack/lib/l2_agent Normal file
View File

@ -0,0 +1,13 @@
function plugin_agent_add_l2_agent_extension {
local l2_agent_extension=$1
if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
L2_AGENT_EXTENSIONS=$l2_agent_extension
elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
fi
}
function configure_l2_agent {
iniset /$Q_PLUGIN_CONF_FILE agent extensions "$L2_AGENT_EXTENSIONS"
}

13
devstack/lib/ml2 Normal file
View File

@ -0,0 +1,13 @@
function enable_ml2_extension_driver {
local extension_driver=$1
if [[ -z "$Q_ML2_PLUGIN_EXT_DRIVERS" ]]; then
Q_ML2_PLUGIN_EXT_DRIVERS=$extension_driver
elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension_driver}, ]]; then
Q_ML2_PLUGIN_EXT_DRIVERS+=",$extension_driver"
fi
}
function configure_qos_ml2 {
enable_ml2_extension_driver "qos"
}

20
devstack/lib/qos Normal file
View File

@ -0,0 +1,20 @@
function configure_qos_service_plugin {
_neutron_service_plugin_class_add "qos"
}
function configure_qos_core_plugin {
configure_qos_$Q_PLUGIN
}
function configure_qos_l2_agent {
plugin_agent_add_l2_agent_extension "qos"
}
function configure_qos {
configure_qos_service_plugin
configure_qos_core_plugin
configure_qos_l2_agent
}

18
devstack/plugin.sh Normal file
View File

@ -0,0 +1,18 @@
LIBDIR=$DEST/neutron/devstack/lib
source $LIBDIR/l2_agent
source $LIBDIR/ml2
source $LIBDIR/qos
if [[ "$1" == "stack" && "$2" == "install" ]]; then
if is_service_enabled q-qos; then
configure_qos
fi
fi
if [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if is_service_enabled q-agt; then
configure_l2_agent
fi
fi

3
devstack/settings Normal file
View File

@ -0,0 +1,3 @@
L2_AGENT_EXTENSIONS=${L2_AGENT_EXTENSIONS:-}
enable_service q-qos

View File

@ -0,0 +1,34 @@
<h2>
Neutron Graphite Thumbnails - Click to see full size figure
</h2>
<table border="1">
<tr>
<td align="center">
Failure Percentage - Last 10 Days - DVR and Full Jobs<br>
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - DVR and Full Jobs&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr-multinode-full.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr-multinode-full.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-dvr-multinode-full%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-dvr%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-multinode-full.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-multinode-full.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-multinode-full%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-full.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-full.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-full%27%29,%27red%27%29">
<img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr-multinode-full.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr-multinode-full.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-dvr-multinode-full%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-dvr%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-multinode-full.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-multinode-full.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-multinode-full%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-full.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-full.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-full%27%29,%27red%27%29" width="400">
</a>
</td>
<td align="center">
Failure Percentage - Last 10 Days - Grenade, DSVM API/Functional/Fullstack<br>
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Grenade, DSVM API/Functional/Fullstack&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-grenade-dsvm-neutron.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-grenade-dsvm-neutron.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-grenade-dsvm-neutron%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-api.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-api.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-api%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-functional.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-functional.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-functional%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-fullstack.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-fullstack.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-fullstack%27%29,%27red%27%29">
<img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-grenade-dsvm-neutron.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-grenade-dsvm-neutron.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-grenade-dsvm-neutron%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-api.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-api.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-api%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-functional.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-functional.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-functional%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-fullstack.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-dsvm-fullstack.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-fullstack%27%29,%27red%27%29" width="400">
</a>
</td>
</tr>
<tr>
<td align="center">
Failure Percentage - Last 10 Days - Rally, LinuxBridge, LBaaS v1/v2<br>
<a href="http://graphite.openstack.org/render/?title=ailure Percentage - Last 10 Days - Rally, LinuxBridge, LBaaS v1/v2&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-rally-dsvm-neutron-neutron.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-rally-dsvm-neutron-neutron.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-rally-dsvm-neutron-neutron%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-linuxbridge.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-linuxbridge.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-linuxbridge%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-lbaasv1-dsvm-api.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-lbaasv1-dsvm-api.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv1-dsvm-api%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-api.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-api.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv2-dsvm-api%27%29,%27red%27%29">
<img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-rally-dsvm-neutron-neutron.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-rally-dsvm-neutron-neutron.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-rally-dsvm-neutron-neutron%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-linuxbridge.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-linuxbridge.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-linuxbridge%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-lbaasv1-dsvm-api.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-lbaasv1-dsvm-api.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv1-dsvm-api%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-api.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-api.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv2-dsvm-api%27%29,%27red%27%29" width="400">
</a>
</td>
<td align="center">
Failure Percentage - Last 10 Days - Large Opts<br>
<a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Large Opts&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29">
<img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE,sum%28stats.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29" width="400">
</a>
</td>
</tr>
</table>

View File

@ -0,0 +1,313 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)
Alembic Migrations
==================
Introduction
------------
The migrations in the alembic/versions contain the changes needed to migrate
from older Neutron releases to newer versions. A migration occurs by executing
a script that details the changes needed to upgrade the database. The migration
scripts are ordered so that multiple scripts can run sequentially to update the
database.
The Migration Wrapper
---------------------
The scripts are executed by Neutron's migration wrapper ``neutron-db-manage``
which uses the Alembic library to manage the migration. Pass the ``--help``
option to the wrapper for usage information.
The wrapper takes some options followed by some commands::
neutron-db-manage <options> <commands>
The wrapper needs to be provided with the database connection string, which is
usually provided in the ``neutron.conf`` configuration file in an installation.
The wrapper automatically reads from ``/etc/neutron/neutron.conf`` if it is
present. If the configuration is in a different location::
neutron-db-manage --config-file /path/to/neutron.conf <commands>
Multiple ``--config-file`` options can be passed if needed.
Instead of reading the DB connection from the configuration file(s) the
``--database-connection`` option can be used::
neutron-db-manage --database-connection mysql+pymysql://root:secret@127.0.0.1/neutron?charset=utf8 <commands>
For some commands the wrapper needs to know the entrypoint of the core plugin
for the installation. This can be read from the configuration file(s) or
specified using the ``--core_plugin`` option::
neutron-db-manage --core_plugin neutron.plugins.ml2.plugin.Ml2Plugin <commands>
When giving examples below of using the wrapper the options will not be shown.
It is assumed you will use the options that you need for your environment.
For new deployments you will start with an empty database. You then upgrade
to the latest database version via::
neutron-db-manage upgrade heads
For existing deployments the database will already be at some version. To
check the current database version::
neutron-db-manage current
After installing a new version of Neutron server, upgrading the database is
the same command::
neutron-db-manage upgrade heads
To create a script to run the migration offline::
neutron-db-manage upgrade heads --sql
To run the offline migration between specific migration versions::
neutron-db-manage upgrade <start version>:<end version> --sql
Upgrade the database incrementally::
neutron-db-manage upgrade --delta <# of revs>
**NOTE:** Database downgrade is not supported.
Migration Branches
------------------
Neutron makes use of alembic branches for two purposes.
1. Indepedent Sub-Project Tables
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Various `sub-projects <sub_projects.html>`_ can be installed with Neutron. Each
sub-project registers its own alembic branch which is responsible for migrating
the schemas of the tables owned by the sub-project.
The neutron-db-manage script detects which sub-projects have been installed by
enumerating the ``neutron.db.alembic_migrations`` entrypoints. For more details
see the `Entry Points section of Contributing extensions to Neutron
<contribute.html#entry-points>`_.
The neutron-db-manage script runs the given alembic command against all
installed sub-projects. (An exception is the ``revision`` command, which is
discussed in the `Developers`_ section below.)
2. Offline/Online Migrations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since Liberty, Neutron maintains two parallel alembic migration branches.
The first one, called 'expand', is used to store expansion-only migration
rules. Those rules are strictly additive and can be applied while
neutron-server is running. Examples of additive database schema changes are:
creating a new table, adding a new table column, adding a new index, etc.
The second branch, called 'contract', is used to store those migration rules
that are not safe to apply while neutron-server is running. Those include:
column or table removal, moving data from one part of the database into another
(renaming a column, transforming single table into multiple, etc.), introducing
or modifying constraints, etc.
The intent of the split is to allow invoking those safe migrations from
'expand' branch while neutron-server is running, reducing downtime needed to
upgrade the service.
For more details, see the `Expand and Contract Scripts`_ section below.
Developers
----------
A database migration script is required when you submit a change to Neutron or
a sub-project that alters the database model definition. The migration script
is a special python file that includes code to upgrade the database to match
the changes in the model definition. Alembic will execute these scripts in
order to provide a linear migration path between revisions. The
neutron-db-manage command can be used to generate migration scripts for you to
complete. The operations in the template are those supported by the Alembic
migration library.
Script Auto-generation
~~~~~~~~~~~~~~~~~~~~~~
::
neutron-db-manage revision -m "description of revision" --autogenerate
This generates a prepopulated template with the changes needed to match the
database state with the models. You should inspect the autogenerated template
to ensure that the proper models have been altered.
In rare circumstances, you may want to start with an empty migration template
and manually author the changes necessary for an upgrade. You can create a
blank file via::
neutron-db-manage revision -m "description of revision"
The timeline on each alembic branch should remain linear and not interleave
with other branches, so that there is a clear path when upgrading. To verify
that alembic branches maintain linear timelines, you can run this command::
neutron-db-manage check_migration
If this command reports an error, you can troubleshoot by showing the migration
timelines using the ``history`` command::
neutron-db-manage history
Expand and Contract Scripts
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The obsolete "branchless" design of a migration script included that it
indicates a specific "version" of the schema, and includes directives that
apply all necessary changes to the database at once. If we look for example at
the script ``2d2a8a565438_hierarchical_binding.py``, we will see::
# .../alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py
def upgrade():
# .. inspection code ...
op.create_table(
'ml2_port_binding_levels',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
# ... more columns ...
)
for table in port_binding_tables:
op.execute((
"INSERT INTO ml2_port_binding_levels "
"SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
"FROM %s "
"WHERE host <> '' "
"AND driver <> '';"
) % table)
op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
op.drop_column('ml2_dvr_port_bindings', 'segment')
op.drop_column('ml2_dvr_port_bindings', 'driver')
# ... more DROP instructions ...
The above script contains directives that are both under the "expand"
and "contract" categories, as well as some data migrations. the ``op.create_table``
directive is an "expand"; it may be run safely while the old version of the
application still runs, as the old code simply doesn't look for this table.
The ``op.drop_constraint`` and ``op.drop_column`` directives are
"contract" directives (the drop column moreso than the drop constraint); running
at least the ``op.drop_column`` directives means that the old version of the
application will fail, as it will attempt to access these columns which no longer
exist.
The data migrations in this script are adding new
rows to the newly added ``ml2_port_binding_levels`` table.
Under the new migration script directory structure, the above script would be
stated as two scripts; an "expand" and a "contract" script::
# expansion operations
# .../alembic_migrations/versions/liberty/expand/2bde560fc638_hierarchical_binding.py
def upgrade():
op.create_table(
'ml2_port_binding_levels',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
# ... more columns ...
)
# contraction operations
# .../alembic_migrations/versions/liberty/contract/4405aedc050e_hierarchical_binding.py
def upgrade():
for table in port_binding_tables:
op.execute((
"INSERT INTO ml2_port_binding_levels "
"SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
"FROM %s "
"WHERE host <> '' "
"AND driver <> '';"
) % table)
op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
op.drop_column('ml2_dvr_port_bindings', 'segment')
op.drop_column('ml2_dvr_port_bindings', 'driver')
# ... more DROP instructions ...
The two scripts would be present in different subdirectories and also part of
entirely separate versioning streams. The "expand" operations are in the
"expand" script, and the "contract" operations are in the "contract" script.
For the time being, data migration rules also belong to contract branch. There
is expectation that eventually live data migrations move into middleware that
will be aware about different database schema elements to converge on, but
Neutron is still not there.
Scripts that contain only expansion or contraction rules do not require a split
into two parts.
If a contraction script depends on a script from expansion stream, the
following directive should be added in the contraction script::
depends_on = ('<expansion-revision>',)
Applying database migration rules
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To apply just expansion rules, execute::
neutron-db-manage upgrade expand@head
After the first step is done, you can stop neutron-server, apply remaining
non-expansive migration rules, if any::
neutron-db-manage upgrade contract@head
and finally, start your neutron-server again.
If you are not interested in applying safe migration rules while the service is
running, you can still upgrade database the old way, by stopping the service,
and then applying all available rules::
neutron-db-manage upgrade head[s]
It will apply all the rules from both the expand and the contract branches, in
proper order.

View File

@ -300,6 +300,14 @@ The output is:
FAQ
===
Can I use the callbacks registry to subscribe and notify non-core resources and events?
Short answer is yes. The callbacks module defines literals for what are considered core Neutron
resources and events. However, the ability to subscribe/notify is not limited to these as you
can use your own defined resources and/or events. Just make sure you use string literals, as
typos are common, and the registry does not provide any runtime validation. Therefore, make
sure you test your code!
What is the relationship between Callbacks and Taskflow?
There is no overlap between Callbacks and Taskflow or mutual exclusion; as matter of fact they
@ -315,6 +323,16 @@ Is there any ordering guarantee during notifications?
notified. Priorities can be a future extension, if a use case arises that require enforced
ordering.
How is the the notifying object expected to interact with the subscribing objects?
The ``notify`` method implements a one-way communication paradigm: the notifier sends a message
without expecting a response back (in other words it fires and forget). However, due to the nature
of Python, the payload can be mutated by the subscribing objects, and this can lead to unexpected
behavior of your code, if you assume that this is the intentional design. Bear in mind, that
passing-by-value using deepcopy was not chosen for efficiency reasons. Having said that, if you
intend for the notifier object to expect a response, then the notifier itself would need to act
as a subscriber.
Is the registry thread-safe?
Short answer is no: it is not safe to make mutations while callbacks are being called (more

View File

@ -439,7 +439,7 @@ should take these steps to move the models for the tables out of tree.
third-party repo as is done in the neutron repo,
i.e. ``networking_foo/db/migration/alembic_migrations/versions/*.py``
#. Remove the models from the neutron repo.
#. Add the names of the removed tables to ``DRIVER_TABLES`` in
#. Add the names of the removed tables to ``REPO_FOO_TABLES`` in
``neutron/db/migration/alembic_migrations/external.py`` (this is used for
testing, see below).
@ -452,7 +452,7 @@ DB Model/Migration Testing
~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is a `template functional test
<https://bugs.launchpad.net/neutron/+bug/1470678>`_ (TODO:Ann) third-party
<http://docs.openstack.org/developer/neutron/devref/template_model_sync_test.html>`_ third-party
maintainers can use to develop tests for model-vs-migration sync in their
repos. It is recommended that each third-party CI sets up such a test, and runs
it regularly against Neutron master.
@ -461,7 +461,7 @@ Liberty Steps
+++++++++++++
The model_sync test will be updated to ignore the models that have been moved
out of tree. A ``DRIVER_TABLES`` list will be maintained in
out of tree. ``REPO_FOO_TABLES`` lists will be maintained in
``neutron/db/migration/alembic_migrations/external.py``.
@ -520,9 +520,11 @@ the installer to configure this item in the ``[default]`` section. For example::
interface_driver = networking_foo.agent.linux.interface.FooInterfaceDriver
**ToDo: Interface Driver port bindings.**
These are currently defined by the ``VIF_TYPES`` in
``neutron/extensions/portbindings.py``. We could make this config-driven
for agents. For Nova, selecting the VIF driver can be done outside of
``VIF_TYPE_*`` constants in ``neutron/extensions/portbindings.py`` should be
moved from neutron core to the repositories where their drivers are
implemented. We need to provide some config or hook mechanism for VIF types
to be registered by external interface drivers. For Nova, selecting the VIF
driver can be done outside of
Neutron (using the new `os-vif python library
<https://review.openstack.org/193668>`_?). Armando and Akihiro to discuss.

View File

@ -23,150 +23,11 @@ should also be added in model. If default value in database is not needed,
business logic.
How we manage database migration rules
--------------------------------------
Database migrations
-------------------
Since Liberty, Neutron maintains two parallel alembic migration branches.
The first one, called 'expand', is used to store expansion-only migration
rules. Those rules are strictly additive and can be applied while
neutron-server is running. Examples of additive database schema changes are:
creating a new table, adding a new table column, adding a new index, etc.
The second branch, called 'contract', is used to store those migration rules
that are not safe to apply while neutron-server is running. Those include:
column or table removal, moving data from one part of the database into another
(renaming a column, transforming single table into multiple, etc.), introducing
or modifying constraints, etc.
The intent of the split is to allow invoking those safe migrations from
'expand' branch while neutron-server is running, reducing downtime needed to
upgrade the service.
To apply just expansion rules, execute:
- neutron-db-manage upgrade liberty_expand@head
After the first step is done, you can stop neutron-server, apply remaining
non-expansive migration rules, if any:
- neutron-db-manage upgrade liberty_contract@head
and finally, start your neutron-server again.
If you are not interested in applying safe migration rules while the service is
running, you can still upgrade database the old way, by stopping the service,
and then applying all available rules:
- neutron-db-manage upgrade head[s]
It will apply all the rules from both the expand and the contract branches, in
proper order.
Expand and Contract Scripts
---------------------------
The obsolete "branchless" design of a migration script included that it
indicates a specific "version" of the schema, and includes directives that
apply all necessary changes to the database at once. If we look for example at
the script ``2d2a8a565438_hierarchical_binding.py``, we will see::
# .../alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py
def upgrade():
# .. inspection code ...
op.create_table(
'ml2_port_binding_levels',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
# ... more columns ...
)
for table in port_binding_tables:
op.execute((
"INSERT INTO ml2_port_binding_levels "
"SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
"FROM %s "
"WHERE host <> '' "
"AND driver <> '';"
) % table)
op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
op.drop_column('ml2_dvr_port_bindings', 'segment')
op.drop_column('ml2_dvr_port_bindings', 'driver')
# ... more DROP instructions ...
The above script contains directives that are both under the "expand"
and "contract" categories, as well as some data migrations. the ``op.create_table``
directive is an "expand"; it may be run safely while the old version of the
application still runs, as the old code simply doesn't look for this table.
The ``op.drop_constraint`` and ``op.drop_column`` directives are
"contract" directives (the drop column moreso than the drop constraint); running
at least the ``op.drop_column`` directives means that the old version of the
application will fail, as it will attempt to access these columns which no longer
exist.
The data migrations in this script are adding new
rows to the newly added ``ml2_port_binding_levels`` table.
Under the new migration script directory structure, the above script would be
stated as two scripts; an "expand" and a "contract" script::
# expansion operations
# .../alembic_migrations/versions/liberty/expand/2bde560fc638_hierarchical_binding.py
def upgrade():
op.create_table(
'ml2_port_binding_levels',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
# ... more columns ...
)
# contraction operations
# .../alembic_migrations/versions/liberty/contract/4405aedc050e_hierarchical_binding.py
def upgrade():
for table in port_binding_tables:
op.execute((
"INSERT INTO ml2_port_binding_levels "
"SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
"FROM %s "
"WHERE host <> '' "
"AND driver <> '';"
) % table)
op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
op.drop_column('ml2_dvr_port_bindings', 'segment')
op.drop_column('ml2_dvr_port_bindings', 'driver')
# ... more DROP instructions ...
The two scripts would be present in different subdirectories and also part of
entirely separate versioning streams. The "expand" operations are in the
"expand" script, and the "contract" operations are in the "contract" script.
For the time being, data migration rules also belong to contract branch. There
is expectation that eventually live data migrations move into middleware that
will be aware about different database schema elements to converge on, but
Neutron is still not there.
Scripts that contain only expansion or contraction rules do not require a split
into two parts.
If a contraction script depends on a script from expansion stream, the
following directive should be added in the contraction script::
depends_on = ('<expansion-revision>',)
For details on the neutron-db-manage wrapper and alembic migrations, see
`Alembic Migrations <alembic_migrations.html>`_.
Tests to verify that database migrations and models are in sync

View File

@ -28,20 +28,23 @@ Why?
----
The idea behind "fullstack" testing is to fill a gap between unit + functional
tests and Tempest. Tempest tests are expensive to run, difficult to run in
a multi node environment, and are often very high level and provide little
indication to what is wrong, only that something is wrong. Developers further
benefit from full stack testing as it can sufficiently simulate a real
environment and provide a rapidly reproducible way to verify code as you're
still writing it.
tests and Tempest. Tempest tests are expensive to run, and operate only
through the REST API. So they can only provide an explanation of what went wrong
gets reported to an end user via the REST API, which is often too high level.
Additionally, Tempest requires an OpenStack deployment to be run against, which
can be difficult to configure and setup. The full stack testing addresses
these issues by taking care of the deployment itself, according to the topology
that the test requires. Developers further benefit from full stack testing as
it can sufficiently simulate a real environment and provide a rapidly
reproducible way to verify code as you're still writing it.
How?
----
Full stack tests set up their own Neutron processes (Server & agents). They
assume a working Rabbit and MySQL server before the run starts. Instructions
on how to run fullstack tests on a VM are available at TESTING.rst:
http://git.openstack.org/cgit/openstack/neutron/tree/TESTING.rst
on how to run fullstack tests on a VM are available in our
`TESTING.rst. <development.environment.html#id2>`_
Each test defines its own topology (What and how many servers and agents should
be running).
@ -52,10 +55,10 @@ through the API and then assert that a namespace was created for it.
Full stack tests run in the Neutron tree with Neutron resources alone. You
may use the Neutron API (The Neutron server is set to NOAUTH so that Keystone
is out of the picture). instances may be simulated with a helper class that
contains a container-like object in its own namespace and IP address. It has
helper methods to send different kinds of traffic. The "instance" may be
connected to br-int or br-ex, to simulate internal or external traffic.
is out of the picture). VMs may be simulated with a container-like class:
neutron.tests.fullstack.resources.machine.FakeFullstackMachine.
An example of its usage may be found at:
neutron/tests/fullstack/test_connectivity.py.
Full stack testing can simulate multi node testing by starting an agent
multiple times. Specifically, each node would have its own copy of the
@ -63,7 +66,7 @@ OVS/DHCP/L3 agents, all configured with the same "host" value. Each OVS agent
is connected to its own pair of br-int/br-ex, and those bridges are then
interconnected.
.. image:: images/fullstack-multinode-simulation.png
.. image:: images/fullstack_multinode_simulation.png
When?
-----

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View File

@ -43,7 +43,9 @@ Programming HowTos and Tutorials
contribute
neutron_api
sub_projects
sub_project_guidelines
client_command_extensions
alembic_migrations
Neutron Internals
@ -53,12 +55,15 @@ Neutron Internals
services_and_agents
api_layer
quota
api_extensions
plugin-api
db_layer
rpc_api
rpc_callbacks
layer3
l2_agents
quality_of_service
advanced_services
oslo-incubator
callbacks
@ -70,6 +75,8 @@ Testing
:maxdepth: 3
fullstack_testing
testing_coverage
template_model_sync_test
Module Reference
----------------

View File

@ -50,7 +50,7 @@ Neutron logical network setup
Neutron logical router setup
----------------------------
* http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html#under_the_hood_openvswitch_scenario1_network
* http://docs.openstack.org/networking-guide/scenario_legacy_ovs.html
::
@ -147,7 +147,7 @@ Neutron Routers are realized in OpenVSwitch
Finding the router in ip/ipconfig
---------------------------------
* http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html
* http://docs.openstack.org/admin-guide-cloud/networking.html
The neutron-l3-agent uses the Linux IP stack and iptables to perform L3 forwarding and NAT.
In order to support multiple routers with potentially overlapping IP addresses, neutron-l3-agent
@ -189,11 +189,11 @@ For example::
Provider Networking
-------------------
Neutron can also be configured to create `provider networks <http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html#provider_terminology>`_
Neutron can also be configured to create `provider networks <http://docs.openstack.org/admin-guide-cloud/networking_adv-features.html#provider-networks>`_
Further Reading
---------------
* `Packet Pushers - Neutron Network Implementation on Linux <http://packetpushers.net/openstack-neutron-network-implementation-in-linux/>`_
* `OpenStack Cloud Administrator Guide <http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html>`_
* `Packet Pushers - Neutron Network Implementation on Linux <http://packetpushers.net/openstack-quantum-network-implementation-in-linux/>`_
* `OpenStack Cloud Administrator Guide <http://docs.openstack.org/admin-guide-cloud/networking.html>`_
* `Neutron - Layer 3 API extension usage guide <http://docs.openstack.org/api/openstack-network/2.0/content/router_ext.html>`_
* `Darragh O'Reilly - The Quantum L3 router and floating IPs <http://techbackground.blogspot.com/2013/05/the-quantum-l3-router-and-floating-ips.html>`_

View File

@ -6,8 +6,8 @@ This Agent uses the `Linux Bridge
<http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge>`_ to
provide L2 connectivity for VM instances running on the compute node to the
public network. A graphical illustration of the deployment can be found in
`OpenStack Admin Guide Linux Bridge
<http://docs.openstack.org/admin-guide-cloud/content/under_the_hood_linuxbridge.html>`_
`Networking Guide
<http://docs.openstack.org/networking-guide/scenario_legacy_lb.html>`_
In most common deployments, there is a compute and a network node. On both the
compute and the network node, the Linux Bridge Agent will manage virtual

View File

@ -26,7 +26,6 @@ GRE Tunneling is documented in depth in the `Networking in too much
detail <http://openstack.redhat.com/Networking_in_too_much_detail>`_
by RedHat.
VXLAN Tunnels
-------------
@ -35,6 +34,16 @@ at layer 2 into a UDP header.
More information can be found in `The VXLAN wiki page.
<http://en.wikipedia.org/wiki/Virtual_Extensible_LAN>`_
Geneve Tunnels
--------------
Geneve uses UDP as its transport protocol and is dynamic
in size using extensible option headers.
It is important to note that currently it is only supported in
newer kernels. (kernel >= 3.18, OVS version >=2.4)
More information can be found in the `Geneve RFC document.
<https://tools.ietf.org/html/draft-ietf-nvo3-geneve-00>`_
Bridge Management
-----------------
@ -71,6 +80,7 @@ future to support existing VLAN-tagged traffic (coming from NFV VMs
for instance) and/or to deal with potential QinQ support natively
available in the Open vSwitch.
Further Reading
---------------

View File

@ -0,0 +1,357 @@
==================
Quality of Service
==================
Quality of Service advanced service is designed as a service plugin. The
service is decoupled from the rest of Neutron code on multiple levels (see
below).
QoS extends core resources (ports, networks) without using mixins inherited
from plugins but through an ml2 extension driver.
Details about the DB models, API extension, and use cases can be found here: `qos spec <http://specs.openstack.org/openstack/neutron-specs/specs/liberty/qos-api-extension.html>`_
.
Service side design
===================
* neutron.extensions.qos:
base extension + API controller definition. Note that rules are subattributes
of policies and hence embedded into their URIs.
* neutron.services.qos.qos_plugin:
QoSPlugin, service plugin that implements 'qos' extension, receiving and
handling API calls to create/modify policies and rules.
* neutron.services.qos.notification_drivers.manager:
the manager that passes object notifications down to every enabled
notification driver.
* neutron.services.qos.notification_drivers.qos_base:
the interface class for pluggable notification drivers that are used to
update backends about new {create, update, delete} events on any rule or
policy change.
* neutron.services.qos.notification_drivers.message_queue:
MQ-based reference notification driver which updates agents via messaging
bus, using `RPC callbacks <rpc_callbacks.html>`_.
* neutron.core_extensions.base:
Contains an interface class to implement core resource (port/network)
extensions. Core resource extensions are then easily integrated into
interested plugins. We may need to have a core resource extension manager
that would utilize those extensions, to avoid plugin modifications for every
new core resource extension.
* neutron.core_extensions.qos:
Contains QoS core resource extension that conforms to the interface described
above.
* neutron.plugins.ml2.extensions.qos:
Contains ml2 extension driver that handles core resource updates by reusing
the core_extensions.qos module mentioned above. In the future, we would like
to see a plugin-agnostic core resource extension manager that could be
integrated into other plugins with ease.
Supported QoS rule types
------------------------
Any plugin or Ml2 mechanism driver can claim support for some QoS rule types by
providing a plugin/driver class property called 'supported_qos_rule_types' that
should return a list of strings that correspond to QoS rule types (for the list
of all rule types, see: neutron.extensions.qos.VALID_RULE_TYPES).
In the most simple case, the property can be represented by a simple Python
list defined on the class.
For Ml2 plugin, the list of supported QoS rule types is defined as a common
subset of rules supported by all active mechanism drivers.
Note: the list of supported rule types reported by core plugin is not enforced
when accessing QoS rule resources. This is mostly because then we would not be
able to create any rules while at least one ml2 driver in gate lacks support
for QoS (at the moment of writing, linuxbridge is such a driver).
Database models
---------------
QoS design defines the following two conceptual resources to apply QoS rules
for a port or a network:
* QoS policy
* QoS rule (type specific)
Each QoS policy contains zero or more QoS rules. A policy is then applied to a
network or a port, making all rules of the policy applied to the corresponding
Neutron resource (for a network, applying a policy means that the policy will
be applied to all ports that belong to it).
From database point of view, following objects are defined in schema:
* QosPolicy: directly maps to the conceptual policy resource.
* QosNetworkPolicyBinding, QosPortPolicyBinding: defines attachment between a
Neutron resource and a QoS policy.
* QosBandwidthLimitRule: defines the only rule type available at the moment.
All database models are defined under:
* neutron.db.qos.models
QoS versioned objects
---------------------
There is a long history of passing database dictionaries directly into business
logic of Neutron. This path is not the one we wanted to take for QoS effort, so
we've also introduced a new objects middleware to encapsulate the database logic
from the rest of the Neutron code that works with QoS resources. For this, we've
adopted oslo.versionedobjects library and introduced a new NeutronObject class
that is a base for all other objects that will belong to the middle layer.
There is an expectation that Neutron will evolve into using objects for all
resources it handles, though that part was obviously out of scope for the QoS
effort.
Every NeutronObject supports the following operations:
* get_by_id: returns specific object that is represented by the id passed as an
argument.
* get_objects: returns all objects of the type, potentially with a filter
applied.
* create/update/delete: usual persistence operations.
Base object class is defined in:
* neutron.objects.base
For QoS, new neutron objects were implemented:
* QosPolicy: directly maps to the conceptual policy resource, as defined above.
* QosBandwidthLimitRule: class that represents the only rule type supported by
initial QoS design.
Those are defined in:
* neutron.objects.qos.policy
* neutron.objects.qos.rule
For QosPolicy neutron object, the following public methods were implemented:
* get_network_policy/get_port_policy: returns a policy object that is attached
to the corresponding Neutron resource.
* attach_network/attach_port: attach a policy to the corresponding Neutron
resource.
* detach_network/detach_port: detach a policy from the corresponding Neutron
resource.
In addition to the fields that belong to QoS policy database object itself,
synthetic fields were added to the object that represent lists of rules that
belong to the policy. To get a list of all rules for a specific policy, a
consumer of the object can just access the corresponding attribute via:
* policy.rules
Implementation is done in a way that will allow adding a new rule list field
with little or no modifications in the policy object itself. This is achieved
by smart introspection of existing available rule object definitions and
automatic definition of those fields on the policy class.
Note that rules are loaded in a non lazy way, meaning they are all fetched from
the database on policy fetch.
For Qos<type>Rule objects, an extendable approach was taken to allow easy
addition of objects for new rule types. To accomodate this, fields common to
all types are put into a base class called QosRule that is then inherited into
type-specific rule implementations that, ideally, only define additional fields
and some other minor things.
Note that the QosRule base class is not registered with oslo.versionedobjects
registry, because it's not expected that 'generic' rules should be
instantiated (and to suggest just that, the base rule class is marked as ABC).
QoS objects rely on some primitive database API functions that are added in:
* neutron.db.api: those can be reused to fetch other models that do not have
corresponding versioned objects yet, if needed.
* neutron.db.qos.api: contains database functions that are specific to QoS
models.
RPC communication
-----------------
Details on RPC communication implemented in reference backend driver are
discussed in `a separate page <rpc_callbacks.html>`_.
One thing that should be mentioned here explicitly is that RPC callback
endpoints communicate using real versioned objects (as defined by serialization
for oslo.versionedobjects library), not vague json dictionaries. Meaning,
oslo.versionedobjects are on the wire and not just used internally inside a
component.
One more thing to note is that though RPC interface relies on versioned
objects, it does not yet rely on versioning features the oslo.versionedobjects
library provides. This is because Liberty is the first release where we start
using the RPC interface, so we have no way to get different versions in a
cluster. That said, the versioning strategy for QoS is thought through and
described in `the separate page <rpc_callbacks.html>`_.
There is expectation that after RPC callbacks are introduced in Neutron, we
will be able to migrate propagation from server to agents for other resources
(f.e. security groups) to the new mechanism. This will need to wait until those
resources get proper NeutronObject implementations.
The flow of updates is as follows:
* if a port that is bound to the agent is attached to a QoS policy, then ML2
plugin detects the change by relying on ML2 QoS extension driver, and
notifies the agent about a port change. The agent proceeds with the
notification by calling to get_device_details() and getting the new port dict
that contains a new qos_policy_id. Each device details dict is passed into l2
agent extension manager that passes it down into every enabled extension,
including QoS. QoS extension sees that there is a new unknown QoS policy for
a port, so it uses ResourcesPullRpcApi to fetch the current state of the
policy (with all the rules included) from the server. After that, the QoS
extension applies the rules by calling into QoS driver that corresponds to
the agent.
* on existing QoS policy update (it includes any policy or its rules change),
server pushes the new policy object state through ResourcesPushRpcApi
interface. The interface fans out the serialized (dehydrated) object to any
agent that is listening for QoS policy updates. If an agent have seen the
policy before (it is attached to one of the ports it maintains), then it goes
with applying the updates to the port. Otherwise, the agent silently ignores
the update.
Agent side design
=================
To ease code reusability between agents and to avoid the need to patch an agent
for each new core resource extension, pluggable L2 agent extensions were
introduced. They can be especially interesting to third parties that don't want
to maintain their code in Neutron tree.
Extensions are meant to receive handle_port events, and do whatever they need
with them.
* neutron.agent.l2.agent_extension:
This module defines an abstract extension interface.
* neutron.agent.l2.extensions.manager:
This module contains a manager that allows to register multiple extensions,
and passes handle_port events down to all enabled extensions.
* neutron.agent.l2.extensions.qos
defines QoS L2 agent extension. It receives handle_port and delete_port
events and passes them down into QoS agent backend driver (see below). The
file also defines the QosAgentDriver interface. Note: each backend implements
its own driver. The driver handles low level interaction with the underlying
networking technology, while the QoS extension handles operations that are
common to all agents.
Agent backends
--------------
At the moment, QoS is supported by Open vSwitch and SR-IOV ml2 drivers.
Each agent backend defines a QoS driver that implements the QosAgentDriver
interface:
* Open vSwitch (QosOVSAgentDriver);
* SR-IOV (QosSRIOVAgentDriver).
Open vSwitch
~~~~~~~~~~~~
Open vSwitch implementation relies on the new ovs_lib OVSBridge functions:
* get_egress_bw_limit_for_port
* create_egress_bw_limit_for_port
* delete_egress_bw_limit_for_port
An egress bandwidth limit is effectively configured on the port by setting
the port Interface parameters ingress_policing_rate and
ingress_policing_burst.
That approach is less flexible than linux-htb, Queues and OvS QoS profiles,
which we may explore in the future, but which will need to be used in
combination with openflow rules.
SR-IOV
~~~~~~
SR-IOV bandwidth limit implementation relies on the new pci_lib function:
* set_vf_max_rate
As the name of the function suggests, the limit is applied on a Virtual
Function (VF).
ip link interface has the following limitation for bandwidth limit: it uses
Mbps as units of bandwidth measurement, not kbps, and does not support float
numbers. So in case the limit is set to something less than 1000 kbps, it's set
to 1 Mbps only. If the limit is set to something that does not divide to 1000
kbps chunks, then the effective limit is rounded to the nearest integer Mbps
value.
Configuration
=============
To enable the service, the following steps should be followed:
On server side:
* enable qos service in service_plugins;
* set the needed notification_drivers in [qos] section (message_queue is the default);
* for ml2, add 'qos' to extension_drivers in [ml2] section.
On agent side (OVS):
* add 'qos' to extensions in [agent] section.
Testing strategy
================
All the code added or extended as part of the effort got reasonable unit test
coverage.
Neutron objects
---------------
Base unit test classes to validate neutron objects were implemented in a way
that allows code reuse when introducing a new object type.
There are two test classes that are utilized for that:
* BaseObjectIfaceTestCase: class to validate basic object operations (mostly
CRUD) with database layer isolated.
* BaseDbObjectTestCase: class to validate the same operations with models in
place and database layer unmocked.
Every new object implemented on top of one of those classes is expected to
either inherit existing test cases as is, or reimplement it, if it makes sense
in terms of how those objects are implemented. Specific test classes can
obviously extend the set of test cases as they see needed (f.e. you need to
define new test cases for those additional methods that you may add to your
object implementations on top of base semantics common to all neutron objects).
Functional tests
----------------
Additions to ovs_lib to set bandwidth limits on ports are covered in:
* neutron.tests.functional.agent.test_ovs_lib
API tests
---------
API tests for basic CRUD operations for ports, networks, policies, and rules were added in:
* neutron.tests.api.test_qos

332
doc/source/devref/quota.rst Normal file
View File

@ -0,0 +1,332 @@
================================
Quota Management and Enforcement
================================
Most resources exposed by the Neutron API are subject to quota limits.
The Neutron API exposes an extension for managing such quotas. Quota limits are
enforced at the API layer, before the request is dispatched to the plugin.
Default values for quota limits are specified in neutron.conf. Admin users
can override those defaults values on a per-tenant basis. Limits are stored
in the Neutron database; if no limit is found for a given resource and tenant,
then the default value for such resource is used.
Configuration-based quota management, where every tenant gets the same quota
limit specified in the configuration file, has been deprecated as of the
Liberty release.
Please note that Neutron does not support both specification of quota limits
per user and quota management for hierarchical multitenancy (as a matter of
fact Neutron does not support hierarchical multitenancy at all). Also, quota
limits are currently not enforced on RPC interfaces listening on the AMQP
bus.
Plugin and ML2 drivers are not supposed to enforce quotas for resources they
manage. However, the subnet_allocation [#]_ extension is an exception and will
be discussed below.
The quota management and enforcement mechanisms discussed here apply to every
resource which has been registered with the Quota engine, regardless of
whether such resource belongs to the core Neutron API or one of its extensions.
High Level View
---------------
There are two main components in the Neutron quota system:
* The Quota API extension;
* The Quota Engine.
Both components rely on a quota driver. The neutron codebase currently defines
two quota drivers:
* neutron.db.quota.driver.DbQuotaDriver
* neutron.quota.ConfDriver
The latter driver is however deprecated.
The Quota API extension handles quota management, whereas the Quota Engine
component handles quota enforcement. This API extension is loaded like any
other extension. For this reason plugins must explicitly support it by including
"quotas" in the support_extension_aliases attribute.
In the Quota API simple CRUD operations are used for managing tenant quotas.
Please note that the current behaviour when deleting a tenant quota is to reset
quota limits for that tenant to configuration defaults. The API
extension does not validate the tenant identifier with the identity service.
Performing quota enforcement is the responsibility of the Quota Engine.
RESTful API controllers, before sending a request to the plugin, try to obtain
a reservation from the quota engine for the resources specified in the client
request. If the reservation is successful, then it proceeds to dispatch the
operation to the plugin.
For a reservation to be successful, the total amount of resources requested,
plus the total amount of resources reserved, plus the total amount of resources
already stored in the database should not exceed the tenant's quota limit.
Finally, both quota management and enforcement rely on a "quota driver" [#]_,
whose task is basically to perform database operations.
Quota Management
----------------
The quota management component is fairly straightforward.
However, unlike the vast majority of Neutron extensions, it uses it own
controller class [#]_.
This class does not implement the POST operation. List, get, update, and
delete operations are implemented by the usual index, show, update and
delete methods. These method simply call into the quota driver for either
fetching tenant quotas or updating them.
The _update_attributes method is called only once in the controller lifetime.
This method dynamically updates Neutron's resource attribute map [#]_ so that
an attribute is added for every resource managed by the quota engine.
Request authorisation is performed in this controller, and only 'admin' users
are allowed to modify quotas for tenants. As the neutron policy engine is not
used, it is not possible to configure which users should be allowed to manage
quotas using policy.json.
The driver operations dealing with quota management are:
* delete_tenant_quota, which simply removes all entries from the 'quotas'
table for a given tenant identifier;
* update_quota_limit, which adds or updates an entry in the 'quotas' tenant for
a given tenant identifier and a given resource name;
* _get_quotas, which fetches limits for a set of resource and a given tenant
identifier
* _get_all_quotas, which behaves like _get_quotas, but for all tenants.
Resource Usage Info
-------------------
Neutron has two ways of tracking resource usage info:
* CountableResource, where resource usage is calculated every time quotas
limits are enforced by counting rows in the resource table and reservations
for that resource.
* TrackedResource, which instead relies on a specific table tracking usage
data, and performs explicitly counting only when the data in this table are
not in sync with actual used and reserved resources.
Another difference between CountableResource and TrackedResource is that the
former invokes a plugin method to count resources. CountableResource should be
therefore employed for plugins which do not leverage the Neutron database.
The actual class that the Neutron quota engine will use is determined by the
track_quota_usage variable in the quota configuration section. If True,
TrackedResource instances will be created, otherwise the quota engine will
use CountableResource instances.
Resource creation is performed by the create_resource_instance factory method
in the neutron.quota.resource module.
From a performance perspective, having a table tracking resource usage
has some advantages, albeit not fundamental. Indeed the time required for
executing queries to explicitly count objects will increase with the number of
records in the table. On the other hand, using TrackedResource will fetch a
single record, but has the drawback of having to execute an UPDATE statement
once the operation is completed.
Nevertheless, CountableResource instances do not simply perform a SELECT query
on the relevant table for a resource, but invoke a plugin method, which might
execute several statements and sometimes even interacts with the backend
before returning.
Resource usage tracking also becomes important for operational correctness
when coupled with the concept of resource reservation, discussed in another
section of this chapter.
Tracking quota usage is not as simple as updating a counter every time
resources are created or deleted.
Indeed a quota-limited resource in Neutron can be created in several ways.
While a RESTful API request is the most common one, resources can be created
by RPC handlers listing on the AMQP bus, such as those which create DHCP
ports, or by plugin operations, such as those which create router ports.
To this aim, TrackedResource instances are initialised with a reference to
the model class for the resource for which they track usage data. During
object initialisation, SqlAlchemy event handlers are installed for this class.
The event handler is executed after a record is inserted or deleted.
As result usage data for that resource and will be marked as 'dirty' once
the operation completes, so that the next time usage data is requested,
it will be synchronised counting resource usage from the database.
Even if this solution has some drawbacks, listed in the 'exceptions and
caveats' section, it is more reliable than solutions such as:
* Updating the usage counters with the new 'correct' value every time an
operation completes.
* Having a periodic task synchronising quota usage data with actual data in
the Neutron DB.
Finally, regardless of whether CountableResource or TrackedResource is used,
the quota engine always invokes its count() method to retrieve resource usage.
Therefore, from the perspective of the Quota engine there is absolutely no
difference between CountableResource and TrackedResource.
Quota Enforcement
-----------------
**NOTE: The reservation engine is currently not wired into the API controller
as issues have been discovered with multiple workers. For more information
see _bug1468134**
.. _bug1468134: https://bugs.launchpad.net/neutron/+bug/1486134
Before dispatching a request to the plugin, the Neutron 'base' controller [#]_
attempts to make a reservation for requested resource(s).
Reservations are made by calling the make_reservation method in
neutron.quota.QuotaEngine.
The process of making a reservation is fairly straightforward:
* Get current resource usages. This is achieved by invoking the count method
on every requested resource, and then retrieving the amount of reserved
resources.
* Fetch current quota limits for requested resources, by invoking the
_get_tenant_quotas method.
* Fetch expired reservations for selected resources. This amount will be
subtracted from resource usage. As in most cases there won't be any
expired reservation, this approach actually requires less DB operations than
doing a sum of non-expired, reserved resources for each request.
* For each resource calculate its headroom, and verify the requested
amount of resource is less than the headroom.
* If the above is true for all resource, the reservation is saved in the DB,
otherwise an OverQuotaLimit exception is raised.
The quota engine is able to make a reservation for multiple resources.
However, it is worth noting that because of the current structure of the
Neutron API layer, there will not be any practical case in which a reservation
for multiple resources is made. For this reason performance optimisation
avoiding repeating queries for every resource are not part of the current
implementation.
In order to ensure correct operations, a row-level lock is acquired in
the transaction which creates the reservation. The lock is acquired when
reading usage data. In case of write-set certification failures,
which can occur in active/active clusters such as MySQL galera, the decorator
oslo_db.api.wrap_db_retry will retry the transaction if a DBDeadLock
exception is raised.
While non-locking approaches are possible, it has been found out that, since
a non-locking algorithms increases the chances of collision, the cost of
handling a DBDeadlock is still lower than the cost of retrying the operation
when a collision is detected. A study in this direction was conducted for
IP allocation operations, but the same principles apply here as well [#]_.
Nevertheless, moving away for DB-level locks is something that must happen
for quota enforcement in the future.
Committing and cancelling a reservation is as simple as deleting the
reservation itself. When a reservation is committed, the resources which
were committed are now stored in the database, so the reservation itself
should be deleted. The Neutron quota engine simply removes the record when
cancelling a reservation (ie: the request failed to complete), and also
marks quota usage info as dirty when the reservation is committed (ie:
the request completed correctly).
Reservations are committed or cancelled by respectively calling the
commit_reservation and cancel_reservation methods in neutron.quota.QuotaEngine.
Reservations are not perennial. Eternal reservation would eventually exhaust
tenants' quotas because they would never be removed when an API worker crashes
whilst in the middle of an operation.
Reservation expiration is currently set to 120 seconds, and is not
configurable, not yet at least. Expired reservations are not counted when
calculating resource usage. While creating a reservation, if any expired
reservation is found, all expired reservation for that tenant and resource
will be removed from the database, thus avoiding build-up of expired
reservations.
Setting up Resource Tracking for a Plugin
------------------------------------------
By default plugins do not leverage resource tracking. Having the plugin
explicitly declare which resources should be tracked is a precise design
choice aimed at limiting as much as possible the chance of introducing
errors in existing plugins.
For this reason a plugin must declare which resource it intends to track.
This can be achieved using the tracked_resources decorator available in the
neutron.quota.resource_registry module.
The decorator should ideally be applied to the plugin's __init__ method.
The decorator accepts in input a list of keyword arguments. The name of the
argument must be a resource name, and the value of the argument must be
a DB model class. For example:
::
@resource_registry.tracked_resources(network=models_v2.Network,
port=models_v2.Port,
subnet=models_v2.Subnet,
subnetpool=models_v2.SubnetPool)
Will ensure network, port, subnet and subnetpool resources are tracked.
In theory, it is possible to use this decorator multiple times, and not
exclusively to __init__ methods. However, this would eventually lead to
code readability and maintainability problems, so developers are strongly
encourage to apply this decorator exclusively to the plugin's __init__
method (or any other method which is called by the plugin only once
during its initialization).
Notes for Implementors of RPC Interfaces and RESTful Controllers
-------------------------------------------------------------------------------
Neutron unfortunately does not have a layer which is called before dispatching
the operation from the plugin which can be leveraged both from RESTful and
RPC over AMQP APIs. In particular the RPC handlers call straight into the
plugin, without doing any request authorisation or quota enforcement.
Therefore RPC handlers must explicitly indicate if they are going to call the
plugin to create or delete any sort of resources. This is achieved in a simple
way, by ensuring modified resources are marked as dirty after the RPC handler
execution terminates. To this aim developers can use the mark_resources_dirty
decorator available in the module neutron.quota.resource_registry.
The decorator would scan the whole list of registered resources, and store
the dirty status for their usage trackers in the database for those resources
for which items have been created or destroyed during the plugin operation.
Exceptions and Caveats
-----------------------
Please be aware of the following limitations of the quota enforcement engine:
* Subnet allocation from subnet pools, in particularly shared pools, is also
subject to quota limit checks. However this checks are not enforced by the
quota engine, but trough a mechanism implemented in the
neutron.ipam.subnetalloc module. This is because the Quota engine is not
able to satisfy the requirements for quotas on subnet allocation.
* The quota engine also provides a limit_check routine which enforces quota
checks without creating reservations. This way of doing quota enforcement
is extremely unreliable and superseded by the reservation mechanism. It
has not been removed to ensure off-tree plugins and extensions which leverage
are not broken.
* SqlAlchemy events might not be the most reliable way for detecting changes
in resource usage. Since the event mechanism monitors the data model class,
it is paramount for a correct quota enforcement, that resources are always
created and deleted using object relational mappings. For instance, deleting
a resource with a query.delete call, will not trigger the event. SQLAlchemy
events should be considered as a temporary measure adopted as Neutron lacks
persistent API objects.
* As CountableResource instance do not track usage data, when making a
reservation no write-intent lock is acquired. Therefore the quota engine
with CountableResource is not concurrency-safe.
* The mechanism for specifying for which resources enable usage tracking
relies on the fact that the plugin is loaded before quota-limited resources
are registered. For this reason it is not possible to validate whether a
resource actually exists or not when enabling tracking for it. Developers
should pay particular attention into ensuring resource names are correctly
specified.
* The code assumes usage trackers are a trusted source of truth: if they
report a usage counter and the dirty bit is not set, that counter is
correct. If it's dirty than surely that counter is out of sync.
This is not very robust, as there might be issues upon restart when toggling
the use_tracked_resources configuration variable, as stale counters might be
trusted upon for making reservations. Also, the same situation might occur
if a server crashes after the API operation is completed but before the
reservation is committed, as the actual resource usage is changed but
the corresponding usage tracker is not marked as dirty.
References
----------
.. [#] Subnet allocation extension: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/subnetallocation.py
.. [#] DB Quota driver class: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/quota_db.py#n33
.. [#] Quota API extension controller: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/quotasv2.py#n40
.. [#] Neutron resource attribute map: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/attributes.py#n639
.. [#] Base controller class: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/base.py#n50
.. [#] http://lists.openstack.org/pipermail/openstack-dev/2015-February/057534.html

View File

@ -0,0 +1,187 @@
=================================
Neutron Messaging Callback System
=================================
Neutron already has a callback system [link-to: callbacks.rst] for
in-process resource callbacks where publishers and subscribers are able
to publish and subscribe for resource events.
This system is different, and is intended to be used for inter-process
callbacks, via the messaging fanout mechanisms.
In Neutron, agents may need to subscribe to specific resource details which
may change over time. And the purpose of this messaging callback system
is to allow agent subscription to those resources without the need to extend
modify existing RPC calls, or creating new RPC messages.
A few resource which can benefit of this system:
* QoS policies;
* Security Groups.
Using a remote publisher/subscriber pattern, the information about such
resources could be published using fanout messages to all interested nodes,
minimizing messaging requests from agents to server since the agents
get subscribed for their whole lifecycle (unless they unsubscribe).
Within an agent, there could be multiple subscriber callbacks to the same
resource events, the resources updates would be dispatched to the subscriber
callbacks from a single message. Any update would come in a single message,
doing only a single oslo versioned objects deserialization on each receiving
agent.
This publishing/subscription mechanism is highly dependent on the format
of the resources passed around. This is why the library only allows
versioned objects to be published and subscribed. Oslo versioned objects
allow object version down/up conversion. #[vo_mkcompat]_ #[vo_mkcptests]_
For the VO's versioning schema look here: #[vo_versioning]_
versioned_objects serialization/deserialization with the
obj_to_primitive(target_version=..) and primitive_to_obj() #[ov_serdes]_
methods is used internally to convert/retrieve objects before/after messaging.
Considering rolling upgrades, there are several scenarios to look at:
* publisher (generally neutron-server or a service) and subscriber (agent)
know the same version of the objects, so they serialize, and deserialize
without issues.
* publisher knows (and sends) an older version of the object, subscriber
will get the object updated to latest version on arrival before any
callback is called.
* publisher sends a newer version of the object, subscriber won't be able
to deserialize the object, in this case (PLEASE DISCUSS), we can think of two
strategies:
The strategy for upgrades will be:
During upgrades, we pin neutron-server to a compatible version for resource
fanout updates, and the server sends both the old, and the newer version.
The new agents process updates, taking the newer version of the resource
fanout updates. When the whole system upgraded, we un-pin the compatible
version fanout.
Serialized versioned objects look like::
{'versioned_object.version': '1.0',
'versioned_object.name': 'QoSPolicy',
'versioned_object.data': {'rules': [
{'versioned_object.version': '1.0',
'versioned_object.name': 'QoSBandwidthLimitRule',
'versioned_object.data': {'name': u'a'},
'versioned_object.namespace': 'versionedobjects'}
],
'uuid': u'abcde',
'name': u'aaa'},
'versioned_object.namespace': 'versionedobjects'}
Topic names for every resource type RPC endpoint
================================================
neutron-vo-<resource_class_name>-<version>
In the future, we may want to get oslo messaging to support subscribing
topics dynamically, then we may want to use:
neutron-vo-<resource_class_name>-<resource_id>-<version> instead,
or something equivalent which would allow fine granularity for the receivers
to only get interesting information to them.
Subscribing to resources
========================
Imagine that you have agent A, which just got to handle a new port, which
has an associated security group, and QoS policy.
The agent code processing port updates may look like::
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
def process_resource_updates(resource_type, resource, event_type):
# send to the right handler which will update any control plane
# details related to the updated resource...
def subscribe_resources():
registry.subscribe(process_resource_updates, resources.SEC_GROUP)
registry.subscribe(process_resource_updates, resources.QOS_POLICY)
def port_update(port):
# here we extract sg_id and qos_policy_id from port..
sec_group = registry.pull(resources.SEC_GROUP, sg_id)
qos_policy = registry.pull(resources.QOS_POLICY, qos_policy_id)
The relevant function is:
* subscribe(callback, resource_type): subscribes callback to a resource type.
The callback function will receive the following arguments:
* resource_type: the type of resource which is receiving the update.
* resource: resource of supported object
* event_type: will be one of CREATED, UPDATED, or DELETED, see
neutron.api.rpc.callbacks.events for details.
With the underlaying oslo_messaging support for dynamic topics on the receiver
we cannot implement a per "resource type + resource id" topic, rabbitmq seems
to handle 10000's of topics without suffering, but creating 100's of
oslo_messaging receivers on different topics seems to crash.
We may want to look into that later, to avoid agents receiving resource updates
which are uninteresting to them.
Unsubscribing from resources
============================
To unsubscribe registered callbacks:
* unsubscribe(callback, resource_type): unsubscribe from specific resource type.
* unsubscribe_all(): unsubscribe from all resources.
Sending resource events
=======================
On the server side, resource updates could come from anywhere, a service plugin,
an extension, anything that updates, creates, or destroys the resource and that
is of any interest to subscribed agents.
The server/publisher side may look like::
from neutron.api.rpc.callbacks.producer import registry
from neutron.api.rpc.callbacks import events
def create_qos_policy(...):
policy = fetch_policy(...)
update_the_db(...)
registry.push(policy, events.CREATED)
def update_qos_policy(...):
policy = fetch_policy(...)
update_the_db(...)
registry.push(policy, events.UPDATED)
def delete_qos_policy(...):
policy = fetch_policy(...)
update_the_db(...)
registry.push(policy, events.DELETED)
References
==========
.. [#ov_serdes] https://github.com/openstack/oslo.versionedobjects/blob/master/oslo_versionedobjects/tests/test_objects.py#L621
.. [#vo_mkcompat] https://github.com/openstack/oslo.versionedobjects/blob/master/oslo_versionedobjects/base.py#L460
.. [#vo_mkcptests] https://github.com/openstack/oslo.versionedobjects/blob/master/oslo_versionedobjects/tests/test_objects.py#L111
.. [#vo_versioning] https://github.com/openstack/oslo.versionedobjects/blob/master/oslo_versionedobjects/base.py#L236

View File

@ -29,7 +29,7 @@ running on the compute nodes, and modifying the IPTables rules on each hyperviso
* `Plugin RPC classes <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/securitygroups_rpc_base.py>`_
* `SecurityGroupServerRpcMixin <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/securitygroups_rpc_base.py#39>`_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes
* `SecurityGroupServerRpcMixin <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/securitygroups_rpc_base.py>`_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes
* SecurityGroupServerRpcMixin - Defines the API methods used to fetch data from the database, in order to return responses to agents via the RPC API
* `Agent RPC classes <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/agent/securitygroups_rpc.py>`_
@ -43,8 +43,8 @@ IPTables Driver
* ``prepare_port_filter`` takes a ``port`` argument, which is a ``dictionary`` object that contains information about the port - including the ``security_group_rules``
* ``prepare_port_filter`` `appends the port to an internal dictionary <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/agent/linux/iptables_firewall.py#L60>`_, ``filtered_ports`` which is used to track the internal state.
* ``prepare_port_filter`` appends the port to an internal dictionary, ``filtered_ports`` which is used to track the internal state.
* Each security group has a `chain <http://www.thegeekstuff.com/2011/01/iptables-fundamentals/>`_ in Iptables.
* The ``IptablesFirewallDriver`` has a method to `convert security group rules into iptables statements <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/agent/linux/iptables_firewall.py#L248>`_
* The ``IptablesFirewallDriver`` has a method to convert security group rules into iptables statements.

View File

@ -0,0 +1,148 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)
Sub-Project Guidelines
======================
This document provides guidance for those who maintain projects that consume
main neutron or neutron advanced services repositories as a dependency. It is
not meant to describe projects that are not tightly coupled with Neutron code.
Code Reuse
----------
At all times, avoid using any Neutron symbols that are explicitly marked as
private (those have an underscore at the start of their names).
Oslo Incubator
~~~~~~~~~~~~~~
Don't ever reuse neutron code that comes from oslo-incubator in your
subprojects. For neutron repository, the code is usually located under the
following path: neutron.openstack.common.*
If you need any oslo-incubator code in your repository, copy it into your
repository from oslo-incubator and then use it from there.
Neutron team does not maintain any backwards compatibility strategy for the
code subtree and can break anyone who relies on it at any time.
Requirements
------------
Neutron dependency
~~~~~~~~~~~~~~~~~~
Subprojects usually depend on neutron repositories, by using -e git://...
schema to define such a dependency. The dependency *must not* be present in
requirements lists though, and instead belongs to tox.ini deps section. This is
because next pbr library releases do not guarantee -e git://... dependencies
will work.
You may still put some versioned neutron dependency in your requirements list
to indicate the dependency for anyone who packages your subproject.
Explicit dependencies
~~~~~~~~~~~~~~~~~~~~~
Each neutron project maintains its own lists of requirements. Subprojects that
depend on neutron while directly using some of those libraries that neutron
maintains as its dependencies must not rely on the fact that neutron will pull
the needed dependencies for them. Direct library usage requires that this
library is mentioned in requirements lists of the subproject.
The reason to duplicate those dependencies is that neutron team does not stick
to any backwards compatibility strategy in regards to requirements lists, and
is free to drop any of those dependencies at any time, breaking anyone who
could rely on those libraries to be pulled by neutron itself.
Automated requirements updates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
At all times, subprojects that use neutron as a dependency should make sure
their dependencies do not conflict with neutron's ones.
Core neutron projects maintain their requirements lists by utilizing a
so-called proposal bot. To keep your subproject in sync with neutron, it is
highly recommended that you register your project in
openstack/requirements:projects.txt file to enable the bot to update
requirements for you.
Once a subproject opts in global requirements synchronization, it should enable
check-requirements jobs in project-config. For example, see `this patch
<https://review.openstack.org/#/c/215671/>`_.
Stable branches
---------------
Stable branches for libraries should be created at the same time when
corresponding neutron stable branches are cut off. This is to avoid situations
when a postponed cut-off results in a stable branch that contains some patches
that belong to the next release. This would require reverting patches, and this
is something you should avoid.
Make sure your neutron dependency uses corresponding stable branch for neutron,
not master.
Note that to keep requirements in sync with core neutron repositories in stable
branches, you should make sure that your project is registered in
openstack/requirements:projects.txt *for the branch in question*.
Subproject stable branches are supervised by horizontal `neutron-stable-maint
team <https://review.openstack.org/#/admin/groups/539,members>`_.
More info on stable branch process can be found on `the following page
<https://wiki.openstack.org/wiki/StableBranch>`_.
Releases
--------
It is suggested that sub-projects release new tarballs on PyPI from time to
time, especially for stable branches. It will make the life of packagers and
other consumers of your code easier.
It is highly suggested that you do not strip pieces of the source tree (tests,
executables, tools) before releasing on PyPI: those missing pieces may be
needed to validate the package, or make the packaging easier or more complete.
As a rule of thumb, don't strip anything from the source tree unless completely
needed.
Sub-Project Release Process
~~~~~~~~~~~~~~~~~~~~~~~~~~~
To release a sub-project, follow the following steps:
* Only members of the `neutron-release
<https://review.openstack.org/#/admin/groups/150,members>`_ gerrit group can
do releases. Make sure you talk to a member of neutron-release to perform
your release.
* For projects which have not moved to post-versioning, we need to push an
alpha tag to avoid pbr complaining. The neutron-release group will handle
this.
* Modify setup.cfg to remove the version (if you have one), which moves your
project to post-versioning, similar to all the other Neutron projects. You
can skip this step if you don't have a version in setup.cfg.
* Have neutron-release push the tag to gerrit.
* Have neutron-release `tag the release
<http://docs.openstack.org/infra/manual/drivers.html#tagging-a-release>`_,
which will release the code to PyPi.

View File

@ -67,6 +67,9 @@ working on testing.
By being included, the project accepts oversight by the TC as a part of
being in OpenStack, and also accepts oversight by the Neutron PTL.
It is also assumed the respective review teams will make sure their projects
stay in line with `current best practices <sub_project_guidelines.html>`_.
Inclusion Criteria
------------------
@ -100,6 +103,10 @@ repo but are summarized here to describe the functionality they provide.
+-------------------------------+-----------------------+
| group-based-policy_ | intent |
+-------------------------------+-----------------------+
| kuryr_ | docker |
+-------------------------------+-----------------------+
| networking-ale-omniswitch_ | ml2 |
+-------------------------------+-----------------------+
| networking-arista_ | ml2,l3 |
+-------------------------------+-----------------------+
| networking-bagpipe-l2_ | ml2 |
@ -134,6 +141,8 @@ repo but are summarized here to describe the functionality they provide.
+-------------------------------+-----------------------+
| networking-ofagent_ | ml2 |
+-------------------------------+-----------------------+
| networking-onos_ | ml2 |
+-------------------------------+-----------------------+
| networking-ovn_ | ml2 |
+-------------------------------+-----------------------+
| networking-ovs-dpdk_ | ml2 |
@ -164,13 +173,23 @@ Functionality legend
- vpn: a VPN service plugin;
- lb: a Load Balancer service plugin;
- intent: a service plugin that provides a declarative API to realize networking;
- docker: a Docker network plugin that uses Neutron to provide networking services to Docker containers;
.. _networking-ale-omniswitch:
ALE Omniswitch
++++++++++++++
* Git: https://git.openstack.org/cgit/openstack/networking-ale-omniswitch
* Launchpad: https://launchpad.net/networking-ale-omniswitch
* Pypi: https://pypi.python.org/pypi/networking-ale-omniswitch
.. _networking-arista:
Arista
++++++
* Git: https://git.openstack.org/cgit/stackforge/networking-arista
* Git: https://git.openstack.org/cgit/openstack/networking-arista
* Launchpad: https://launchpad.net/networking-arista
* Pypi: https://pypi.python.org/pypi/networking-arista
@ -212,7 +231,7 @@ Brocade
Cisco
+++++
* Git: https://git.openstack.org/cgit/stackforge/networking-cisco
* Git: https://git.openstack.org/cgit/openstack/networking-cisco
* Launchpad: https://launchpad.net/networking-cisco
* PyPI: https://pypi.python.org/pypi/networking-cisco
@ -268,6 +287,15 @@ IBM SDNVE
* Git: https://git.openstack.org/cgit/stackforge/networking-ibm
* Launchpad: https://launchpad.net/networking-ibm
.. _kuryr:
Kuryr
+++++
* Git: https://git.openstack.org/cgit/openstack/kuryr/
* Launchpad: https://launchpad.net/kuryr
* PyPI: https://pypi.python.org/pypi/kuryr/
.. _networking-l2gw:
L2 Gateway
@ -326,6 +354,15 @@ OpenFlow Agent (ofagent)
* Launchpad: https://launchpad.net/networking-ofagent
* PyPI: https://pypi.python.org/pypi/networking-ofagent
.. _networking-onos:
Open Network Operating System (onos)
++++++++++++++++++++++++++++++++++++
* Git: https://git.openstack.org/cgit/openstack/networking-onos
* Launchpad: https://launchpad.net/networking-onos
* PyPI: https://pypi.python.org/pypi/networking-onos
.. _networking-ovn:
Open Virtual Network
@ -348,7 +385,7 @@ Open DPDK
PLUMgrid
++++++++
* Git: https://git.openstack.org/cgit/stackforge/networking-plumgrid
* Git: https://git.openstack.org/cgit/openstack/networking-plumgrid
* Launchpad: https://launchpad.net/networking-plumgrid
* PyPI: https://pypi.python.org/pypi/networking-plumgrid

View File

@ -0,0 +1,157 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)
Template for ModelMigrationSync for external repos
==================================================
This section contains a template for a test which checks that the Python models
for database tables are synchronized with the alembic migrations that create
the database schema. This test should be implemented in all driver/plugin
repositories that were split out from Neutron.
What does the test do?
----------------------
This test compares models with the result of existing migrations. It is based on
`ModelsMigrationsSync
<http://docs.openstack.org/developer/oslo.db/api/sqlalchemy/test_migrations.html#oslo_db.sqlalchemy.test_migrations.ModelsMigrationsSync>`_
which is provided by oslo.db and was adapted for Neutron. It compares core
Neutron models and vendor specific models with migrations from Neutron core and
migrations from the driver/plugin repo. This test is functional - it runs against
MySQL and PostgreSQL dialects. The detailed description of this test can be
found in Neutron Database Layer section - `Tests to verify that database
migrations and models are in sync
<http://docs.openstack.org/developer/neutron/devref/db_layer.html#module-neutron.tests.functional.db.test_migrations>`_.
Steps for implementing the test
-------------------------------
1. Import all models in one place
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create a module ``networking_foo/db/models/head.py`` with the following
content: ::
from neutron.db.migration.models import head
from networking_foo import models # noqa
# Alternatively, import separate modules here if the models are not in one
# models.py file
def get_metadata():
return head.model_base.BASEV2.metadata
2. Implement the test module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The test uses external.py from Neutron. This file contains lists of table
names, which were moved out of Neutron: ::
VPNAAS_TABLES = [...]
LBAAS_TABLES = [...]
FWAAS_TABLES = [...]
# Arista ML2 driver Models moved to openstack/networking-arista
REPO_ARISTA_TABLES = [...]
# Models moved to openstack/networking-cisco
REPO_CISCO_TABLES = [...]
...
TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + ...
+ REPO_ARISTA_TABLES + REPO_CISCO_TABLES)
Also the test uses **VERSION_TABLE**, it is the name of table in database which
contains revision id of head migration. It is preferred to keep this variable in
``networking_foo/db/migration/alembic_migrations/__init__.py`` so it will be easy
to use in test.
Create a module ``networking_foo/tests/functional/db/test_migrations.py``
with the following content: ::
from oslo_config import cfg
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration import cli as migration
from neutron.tests.common import base
from neutron.tests.functional.db import test_migrations
from networking_foo.db.migration import alembic_migrations
from networking_foo.db.models import head
# EXTERNAL_TABLES should contain all names of tables that are not related to
# current repo.
EXTERNAL_TABLES = set(external.TABLES) - set(external.REPO_FOO_TABLES)
class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations):
def db_sync(self, engine):
cfg.CONF.set_override('connection', engine.url, group='database')
for conf in migration.get_alembic_configs():
self.alembic_config = conf
self.alembic_config.neutron_config = cfg.CONF
migration.do_alembic_command(conf, 'upgrade', 'heads')
def get_metadata(self):
return head.get_metadata()
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table' and (name == 'alembic' or
name == alembic_migrations.VERSION_TABLE or
name in EXTERNAL_TABLES):
return False
else:
return True
class TestModelsMigrationsMysql(_TestModelsMigrationsFoo,
base.MySQLTestCase):
pass
class TestModelsMigrationsPsql(_TestModelsMigrationsFoo,
base.PostgreSQLTestCase):
pass
3. Add functional requirements
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A separate file ``networking_foo/tests/functional/requirements.txt`` should be
created containing the following requirements that are needed for successful
test execution.
::
psutil>=1.1.1,<2.0.0
psycopg2
PyMySQL>=0.6.2 # MIT License
Example implementation `in VPNaaS <https://review.openstack.org/209943>`_

View File

@ -0,0 +1,114 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)
Test Coverage
=============
The intention is to track merged features or areas of code that lack certain
types of tests. This document may be used both by developers that want to
contribute tests, and operators that are considering adopting a feature.
Coverage
--------
Note that while both API and scenario tests target a deployed OpenStack cloud,
API tests are under the Neutron tree and scenario tests are under the Tempest
tree.
It is the expectation that API changes involve API tests, agent features
or modifications involve functional tests, and Neutron-wide features involve
fullstack or scenario tests as appropriate.
The table references tests that explicitly target a feature, and not a job
that is configured to run against a specific backend (Thereby testing it
implicitly). So, for example, while the Linux bridge agent has a job that runs
the API and scenario tests with the Linux bridge agent configured, it does not
have functional tests that target the agent explicitly. The 'gate' column
is about running API/scenario tests with Neutron configured in a certain way,
such as what L2 agent to use or what type of routers to create.
* V - Merged
* Blank - Not applicable
* X - Absent or lacking
* Patch number - Currently in review
* A name - That person has committed to work on an item
+------------------------+------------+------------+------------+------------+------------+------------+
| Area | Unit | Functional | API | Fullstack | Scenario | Gate |
+========================+============+============+============+============+============+============+
| DVR | Partial* | L3-V OVS-X | V | amuller | X | V |
+------------------------+------------+------------+------------+------------+------------+------------+
| L3 HA | V | V | X | 196393 | X | X |
+------------------------+------------+------------+------------+------------+------------+------------+
| L2pop | V | X | | X | | |
+------------------------+------------+------------+------------+------------+------------+------------+
| DHCP HA | V | | | amuller | | |
+------------------------+------------+------------+------------+------------+------------+------------+
| OVS ARP responder | V | X* | | X* | | |
+------------------------+------------+------------+------------+------------+------------+------------+
| OVS agent | V | Partial | | V | | V |
+------------------------+------------+------------+------------+------------+------------+------------+
| Linux Bridge agent | V | X | | X | | Non-voting |
+------------------------+------------+------------+------------+------------+------------+------------+
| Metering | V | X | V | X | | |
+------------------------+------------+------------+------------+------------+------------+------------+
| DHCP agent | V | 136834 | | amuller | | V |
+------------------------+------------+------------+------------+------------+------------+------------+
| rpc_workers | | | | | | X |
+------------------------+------------+------------+------------+------------+------------+------------+
| Reference ipam driver | V | | | | | X (?) |
+------------------------+------------+------------+------------+------------+------------+------------+
| MTU advertisement | V | | | X | | |
+------------------------+------------+------------+------------+------------+------------+------------+
| VLAN transparency | V | | X | X | | |
+------------------------+------------+------------+------------+------------+------------+------------+
| Prefix delegation | V | X | | X | | |
+------------------------+------------+------------+------------+------------+------------+------------+
* DVR DB unit tests often assert that internal methods were called instead of
testing functionality. A lot of our unit tests are flawed in this way,
and DVR unit tests especially so. An attempt to remedy this was made
in patch 178880.
* OVS ARP responder cannot be tested at the gate because the gate uses Ubuntu
14.04 that only packages OVS 2.0. OVS added ARP manipulation support in
version 2.1.
* Prefix delegation doesn't have functional tests for the dibbler and pd
layers, nor for the L3 agent changes.
Missing Infrastructure
----------------------
The following section details missing test *types*. If you want to pick up
an action item, please contact amuller for more context and guidance.
* The Neutron team would like Rally to persist results over a window of time,
graph and visualize this data, so that reviewers could compare average runs
against a proposed patch.
* It's possible to test RPC methods via the unit tests infrastructure. This was
proposed in patch 162811. The goal is provide developers a light weight
way to rapidly run tests that target the RPC layer, so that a patch that
modifies an RPC method's signature could be verified quickly and locally.
* Neutron currently does not test an in-place upgrade (Upgrading the server
first, followed by agents one machine at a time). We make sure that the RPC
layer remains backwards compatible manually via the review process but have
no CI that verifies this.

View File

@ -60,13 +60,14 @@ OPTIONS
FILES
========
plugins.ini file contains the plugin information
neutron.conf file contains configuration information in the form of python-gflags.
* plugins.ini file contains the plugin information.
* neutron.conf file contains neutron-server's configuration information.
SEE ALSO
========
* `OpenStack Neutron <http://neutron.openstack.org>`__
* `OpenStack Neutron Documents <http://docs.openstack.org/developer/neutron>`__
* `OpenStack Neutron Wiki Page <https://wiki.openstack.org/wiki/Neutron>`__
BUGS
====

View File

@ -62,9 +62,10 @@ gate and bug triage for their area of focus is under control.
The following are the current Neutron Lieutenants.
+------------------------+---------------------------+----------------------+
| Area | Lieutenant | IRC nic |
| Area | Lieutenant | IRC nick |
+========================+===========================+======================+
| API and DB | Akihiro Motoki | amotoki |
| +---------------------------+----------------------+
| | Henry Gessau | HenryG |
+------------------------+---------------------------+----------------------+
| Built-In Control Plane | Kevin Benton | kevinbenton |
@ -73,6 +74,10 @@ The following are the current Neutron Lieutenants.
+------------------------+---------------------------+----------------------+
| Docs | Edgar Magana | emagana |
+------------------------+---------------------------+----------------------+
| Infra | Armando Migliaccio | armax |
| +---------------------------+----------------------+
| | Doug Wiegley | dougwig |
+------------------------+---------------------------+----------------------+
| L3 | Carl Baldwin | carl_baldwin |
+------------------------+---------------------------+----------------------+
| Services | Doug Wiegley | dougwig |
@ -89,6 +94,7 @@ Some notes on the above:
* Services includes FWaaS, LBaaS, and VPNaaS.
* Note these areas may change as the project evolves due to code refactoring,
new feature areas, and libification of certain pieces of code.
* Infra means interactions with infra from a neutron perspective
Neutron also consists of several plugins, drivers, and agents that are developed
effectively as sub-projects within Neutron in their own git repositories.
@ -100,19 +106,29 @@ updating the core review team for the sub-project's repositories.
| Area | Lieutenant | IRC nick |
+========================+===========================+======================+
| dragonflow | Eran Gampel | gampel |
| +---------------------------+----------------------+
| | Gal Sagie | gsagie |
+------------------------+---------------------------+----------------------+
| kuryr | Antoni Segura Puimedon | apuimedo |
| +---------------------------+----------------------+
| | Gal Sagie | gsagie |
+------------------------+---------------------------+----------------------+
| networking-l2gw | Sukhdev Kapur | sukhdev |
+------------------------+---------------------------+----------------------+
| networking-midonet | Ryu Ishimoto | ryu_ishimoto |
| +---------------------------+----------------------+
| | Jaume Devesa | devvesa |
| +---------------------------+----------------------+
| | YAMAMOTO Takashi | yamamoto |
+------------------------+---------------------------+----------------------+
| networking-odl | Flavio Fernandes | flaviof |
| +---------------------------+----------------------+
| | Kyle Mestery | mestery |
+------------------------+---------------------------+----------------------+
| networking-ofagent | YAMAMOTO Takashi | yamamoto |
+------------------------+---------------------------+----------------------+
| networking-onos | Vikram Choudhary | vikram |
+------------------------+---------------------------+----------------------+
| networking-ovn | Russell Bryant | russellb |
+------------------------+---------------------------+----------------------+
| networking-plumgrid | Fawad Khaliq | fawadkhaliq |

View File

@ -36,11 +36,19 @@
# use_namespaces = True will be enforced.
# use_namespaces = True
# In some cases the neutron router is not present to provide the metadata
# IP but the DHCP server can be used to provide this info. Setting this
# value will force the DHCP server to append specific host routes to the
# DHCP request. If this option is set, then the metadata service will be
# activated for all the networks.
# force_metadata = False
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
# This option doesn't have any effect when force_metadata is set to True.
# enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
@ -58,7 +66,8 @@
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
# Domain to use for building the hostnames. This option will be deprecated in
# a future release. It is being replaced by dns_domain in neutron.conf
# dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file

View File

@ -50,6 +50,11 @@
# and not through this parameter.
# ipv6_gateway =
# (StrOpt) Driver used for ipv6 prefix delegation. This needs to be
# an entry point defined in the neutron.agent.linux.pd_drivers namespace. See
# setup.cfg for entry points included with the neutron source.
# prefix_delegation_driver = dibbler
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents

View File

@ -75,7 +75,7 @@
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering,qos
# Paste configuration file
# api_paste_config = api-paste.ini
@ -114,6 +114,9 @@
# tell dnsmasq to use infinite lease times.
# dhcp_lease_duration = 86400
# Domain to use for building the hostnames
# dns_domain = openstacklocal
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
@ -178,6 +181,11 @@
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# Agent starts with admin_state_up=False when enable_new_agents=False.
# In the case, user's resources will not be scheduled automatically to the
# agent until admin changes admin_state_up to True.
# enable_new_agents = True
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
@ -256,6 +264,17 @@
#
# Enable snat by default on external gateway when available
# enable_snat_by_default = True
#
# The network type to use when creating the HA network for an HA router.
# By default or if empty, the first 'tenant_network_types'
# is used. This is helpful when the VRRP traffic should use a specific
# network which not the default one.
# ha_network_type =
# Example: ha_network_type = flat
#
# The physical network name with which the HA network can be created.
# ha_network_physical_name =
# Example: ha_network_physical_name = physnet1
# =========== end of items for l3 extension =======
# =========== items for metadata proxy configuration ==============
@ -1017,3 +1036,7 @@ lock_path = $state_path/lock
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
# Deprecated group/name - [DEFAULT]/fake_rabbit
# fake_rabbit = false
[qos]
# Drivers list to use to send the update notification
# notification_drivers = message_queue

View File

@ -1,50 +0,0 @@
[sdnve]
# (ListOpt) The IP address of one (or more) SDN-VE controllers
# Default value is: controller_ips = 127.0.0.1
# Example: controller_ips = 127.0.0.1,127.0.0.2
# (StrOpt) The integration bridge for OF based implementation
# The default value for integration_bridge is None
# Example: integration_bridge = br-int
# (ListOpt) The interface mapping connecting the integration
# bridge to external network as a list of physical network names and
# interfaces: <physical_network_name>:<interface_name>
# Example: interface_mappings = default:eth2
# (BoolOpt) Used to reset the integration bridge, if exists
# The default value for reset_bridge is True
# Example: reset_bridge = False
# (BoolOpt) Used to set the OVS controller as out-of-band
# The default value for out_of_band is True
# Example: out_of_band = False
#
# (BoolOpt) The fake controller for testing purposes
# Default value is: use_fake_controller = False
# (StrOpt) The port number for use with controller
# The default value for the port is 8443
# Example: port = 8443
# (StrOpt) The userid for use with controller
# The default value for the userid is admin
# Example: userid = sdnve_user
# (StrOpt) The password for use with controller
# The default value for the password is admin
# Example: password = sdnve_password
#
# (StrOpt) The default type of tenants (and associated resources)
# Available choices are: OVERLAY or OF
# The default value for tenant type is OVERLAY
# Example: default_tenant_type = OVERLAY
# (StrOpt) The string in tenant description that indicates
# Default value for OF tenants: of_signature = SDNVE-OF
# (StrOpt) The string in tenant description that indicates
# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY
[sdnve_agent]
# (IntOpt) Agent's polling interval in seconds
# polling_interval = 2
# (StrOpt) What to use for root helper
# The default value: root_helper = 'sudo'
# (BoolOpt) Whether to use rpc or not
# The default value: rpc = True
[securitygroup]
# The security group is not supported:
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver

View File

@ -2,15 +2,16 @@
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
# type_drivers = local,flat,vlan,gre,vxlan,geneve
# Example: type_drivers = flat,vlan,gre,vxlan,geneve
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
# Example: tenant_network_types = vlan,gre,vxlan,geneve
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
@ -93,6 +94,22 @@
# vxlan_group =
# Example: vxlan_group = 239.1.1.1
[ml2_type_geneve]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of Geneve VNI IDs that are available for tenant network allocation.
#
# vni_ranges =
# (IntOpt) Geneve encapsulation header size is dynamic, this
# value is used to calculate the maximum MTU for the driver.
# this is the sum of the sizes of the outer ETH+IP+UDP+GENEVE
# header sizes.
# The default size for this field is 50, which is the size of the
# Geneve header without any additional option headers
#
# max_header_size =
# Example: max_header_size = 50 (Geneve headers with no additional options)
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.

View File

@ -1,157 +0,0 @@
[ml2_cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
# The total length allowed for the prefix name and VLAN is 32 characters,
# the prefix will be truncated if the total length is greater than 32.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
#
# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch.
# This string value must be present in the ml2_conf.ini network_vlan_ranges
# variable.
#
# managed_physical_network =
# Example: managed_physical_network = physnet1
# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# Cisco Nexus Switch Format.
# [ml2_mech_cisco_nexus:<IP address of switch>]
# <hostname>=<intf_type:port> (1)
# ssh_port=<ssh port> (2)
# username=<credential username> (3)
# password=<credential password> (4)
# nve_src_intf=<loopback number> (5)
# physnet=<physical network> (6)
#
# (1) For each host connected to a port on the switch, specify the hostname
# and the Nexus physical port (interface) it is connected to.
# Valid intf_type's are 'ethernet' and 'port-channel'.
# The default setting for <intf_type:> is 'ethernet' and need not be
# added to this setting.
# (2) The TCP port for connecting via SSH to manage the switch. This is
# port number 22 unless the switch has been configured otherwise.
# (3) The username for logging into the switch to manage it.
# (4) The password for logging into the switch to manage it.
# (5) Only valid if VXLAN overlay is configured and vxlan_global_config is
# set to True.
# The NVE source interface is a loopback interface that is configured on
# the switch with valid /32 IP address. This /32 IP address must be known
# by the transient devices in the transport network and the remote VTEPs.
# This is accomplished by advertising it through a dynamic routing protocol
# in the transport network. (NB: If no nve_src_intf is defined then a
# default setting of 0 (creates "loopback0") will be used.)
# (6) Only valid if VXLAN overlay is configured.
# The physical network name defined in the network_vlan_ranges variable
# (defined under the ml2_type_vlan section) that this switch is controlling.
# The configured 'physnet' is the physical network domain that is connected
# to this switch. The vlan ranges defined in network_vlan_ranges for a
# a physical network are allocated dynamically and are unique per physical
# network. These dynamic vlans may be reused across physical networks.
#
# Example:
# [ml2_mech_cisco_nexus:1.1.1.1]
# compute1=1/1
# compute2=ethernet:1/2
# compute3=port-channel:1
# ssh_port=22
# username=admin
# password=mySecretPassword
# nve_src_intf=1
# physnet=physnet1
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# provider VLAN interface. For example, if an interface is being created
# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
# The total length allowed for the prefix name and VLAN is 32 characters,
# the prefix will be truncated if the total length is greater than 32.
#
# provider_vlan_name_prefix = p-
# Example: provider_vlan_name_prefix = PV-
# (BoolOpt) A flag indicating whether OpenStack networking should manage the
# creation and removal of VLANs for provider networks on the Nexus
# switches. If the flag is set to False then OpenStack will not create or
# remove VLANs for provider networks, and the administrator needs to
# manage these interfaces manually or by external orchestration.
#
# provider_vlan_auto_create = True
# (BoolOpt) A flag indicating whether OpenStack networking should manage
# the adding and removing of provider VLANs from trunk ports on the Nexus
# switches. If the flag is set to False then OpenStack will not add or
# remove provider VLANs from trunk ports, and the administrator needs to
# manage these operations manually or by external orchestration.
#
# provider_vlan_auto_trunk = True
# (BoolOpt) A flag indicating whether OpenStack networking should manage the
# creating and removing of the Nexus switch VXLAN global settings of 'feature
# nv overlay', 'feature vn-segment-vlan-based', 'interface nve 1' and the NVE
# subcommand 'source-interface loopback #'. If the flag is set to False
# (default) then OpenStack will not add or remove these VXLAN settings, and
# the administrator needs to manage these operations manually or by external
# orchestration.
#
# vxlan_global_config = True
# (BoolOpt) To make Nexus device persistent by running the Nexus
# CLI 'copy run start' after applying successful configurations.
# (default) This flag defaults to False keep consistent with
# existing functionality.
#
# persistent_switch_config = False
# (IntOpt) Time interval to check the state of the Nexus device.
# (default) This value defaults to 0 seconds which disables this
# functionality. When enabled, 30 seconds is suggested.
#
# switch_heartbeat_time = 0
# (IntOpt) Number of times to attempt config replay with switch.
# This variable depends on switch_heartbeat_time being enabled.
# (default) This value defaults to 3
#
# switch_replay_count = 3
[ml2_type_nexus_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN Network IDs that are available for tenant network allocation.
#
# vni_ranges =
# Example: 100:1000,2000:6000
#
# (ListOpt) Multicast groups for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. Comma separated
# list of min:max ranges of multicast IP's.
# NOTE: must be a valid multicast IP, invalid IP's will be discarded
#
# mcast_ranges =
# Example: mcast_ranges = 224.0.0.1:224.0.0.3,224.0.1.1:224.0.1.
[ml2_cisco_ucsm]
# Cisco UCS Manager IP address
# ucsm_ip=1.1.1.1
# Username to connect to UCS Manager
# ucsm_username=user
# Password to connect to UCS Manager
# ucsm_password=password
# SR-IOV and VM-FEX vendors supported by this plugin
# xxxx:yyyy represents vendor_id:product_id
# supported_pci_devs = ['2222:3333', '4444:5555']
# Hostname to Service profile mapping for UCS Manager
# controlled compute hosts
# ucsm_host_list=Hostname1:Serviceprofile1, Hostname2:Serviceprofile2

View File

@ -57,6 +57,11 @@
# 'ovs-ofctl' is currently the only available choice.
# of_interface = ovs-ofctl
# (StrOpt) ovs datapath to use.
# 'system' is the default value and corresponds to the kernel datapath.
# To enable the userspace datapath set this value to 'netdev'
# datapath_type = system
[agent]
# Log agent heartbeats from this OVS agent
# log_agent_heartbeats = False
@ -133,6 +138,11 @@
#
# quitting_rpc_timeout = 10
# (ListOpt) Extensions list to use
# Example: extensions = qos
#
# extensions =
[securitygroup]
# Firewall driver for realizing neutron security group function.
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver

View File

@ -1,63 +0,0 @@
# Sample Configurations
[ovs]
# Do not change this parameter unless you have a good reason to.
# This is the name of the OVS integration bridge. There is one per hypervisor.
# The integration bridge acts as a virtual "patch port". All VM VIFs are
# attached to this bridge and then "patched" according to their network
# connectivity.
# integration_bridge = br-int
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
[securitygroup]
# Firewall driver for realizing neutron security group function
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
[ofc]
# Specify OpenFlow Controller Host, Port and Driver to connect.
# host = 127.0.0.1
# port = 8888
# Base URL of OpenFlow Controller REST API.
# It is prepended to a path of each API request.
# path_prefix =
# Drivers are in neutron/plugins/nec/drivers/ .
# driver = trema
# PacketFilter is available when it's enabled in this configuration
# and supported by the driver.
# enable_packet_filter = true
# Support PacketFilter on OFC router interface
# support_packet_filter_on_ofc_router = true
# Use SSL to connect
# use_ssl = false
# Key file
# key_file =
# Certificate file
# cert_file =
# Disable SSL certificate verification
# insecure_ssl = false
# Maximum attempts per OFC API request. NEC plugin retries
# API request to OFC when OFC returns ServiceUnavailable (503).
# The value must be greater than 0.
# api_max_attempts = 3
[provider]
# Default router provider to use.
# default_router_provider = l3-agent
# List of enabled router providers.
# router_providers = l3-agent,openflow

View File

@ -1,14 +0,0 @@
# Config file for Neutron PLUMgrid Plugin
[plumgriddirector]
# This line should be pointing to the PLUMgrid Director,
# for the PLUMgrid platform.
# director_server=<director-ip-address>
# director_server_port=<director-port>
# Authentification parameters for the Director.
# These are the admin credentials to manage and control
# the PLUMgrid Director server.
# username=<director-admin-username>
# password=<director-admin-password>
# servertimeout=5
# driver=<plugin-driver>

View File

@ -1,283 +0,0 @@
[DEFAULT]
# User name for NSX controller
# nsx_user = admin
# Password for NSX controller
# nsx_password = admin
# Time before aborting a request on an unresponsive controller (Seconds)
# http_timeout = 75
# Maximum number of times a particular request should be retried
# retries = 2
# Maximum number of times a redirect response should be followed
# redirects = 2
# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
# is omitted, 443 is assumed. This option MUST be specified, e.g.:
# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
# UUID of the pre-existing default NSX Transport zone to be used for creating
# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
# (Optional) UUID for the default l3 gateway service to use with this cluster.
# To be specified if planning to use logical routers with external gateways.
# default_l3_gw_service_uuid =
# (Optional) UUID for the default l2 gateway service to use with this cluster.
# To be specified for providing a predefined gateway tenant for connecting their networks.
# default_l2_gw_service_uuid =
# (Optional) UUID for the default service cluster. A service cluster is introduced to
# represent a group of gateways and it is needed in order to use Logical Services like
# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
# default_service_cluster_uuid =
# Name of the default interface name to be used on network-gateway. This value
# will be used for any device associated with a network gateway for which an
# interface name was not specified
# nsx_default_interface_name = breth0
# Reconnect connection to nsx if not used within this amount of time.
# conn_idle_timeout = 900
[quotas]
# number of network gateways allowed per tenant, -1 means unlimited
# quota_network_gateway = 5
[nsxv]
# URL for NSXv manager
# manager_uri = https://management_ip
# User name for NSXv manager
# user = admin
# Password for NSXv manager
# password = default
# (Required) Datacenter ID for Edge deployment
# datacenter_moid =
# (Required) Cluster IDs for clusters containing OpenStack hosts
# cluster_moid =
# (Optional) Deployment Container ID for NSX Edge deployment
# If not specified, either a default global container will be used, or
# the resource pool and datastore specified below will be used
# deployment_container_id =
# (Optional) Resource pool ID for NSX Edge deployment
# resource_pool_id =
# (Optional) Datastore ID for NSX Edge deployment
# datastore_id =
# (Required) UUID of logic switch for physical network connectivity
# external_network =
# (Optional) Asynchronous task status check interval
# default is 2000 (millisecond)
# task_status_check_interval = 2000
# (Optional) Network scope ID for VXLAN virtual wires
# vdn_scope_id =
# (Optional) DVS ID for VLANS
# dvs_id =
# (ListOpt) Define backup edge pool's management range with the four-tuple:
# <edge_type>:[edge_size]:<minimum_pooled_edges>:<maximum_pooled_edges>.
# edge_type:'service'(service edge) or 'vdr'(distributed edge).
# edge_size: 'compact', 'large'(by default), 'xlarge' or 'quadlarge'.
#
# By default, edge pool manager would manage service edge
# with compact&&large size and distributed edge with large size as following:
# backup_edge_pool = service:large:4:10,service:compact:4:10,vdr:large:4:10
# (Optional) Maximum number of sub interfaces supported per vnic in edge
# default is 20
# maximum_tunnels_per_vnic = 20
# Maximum number of API retries
# retries = 10
# (Optional) Network ID for management network connectivity
# mgt_net_moid =
# (Optional) Management network IP address for metadata proxy
# mgt_net_proxy_ips =
# (Optional) Management network netmask for metadata proxy
# mgt_net_proxy_netmask =
# (Optional) Management network default gateway for metadata proxy
# mgt_net_default_gateway =
# (Optional) IP addresses used by Nova metadata service
# nova_metadata_ips =
# (Optional) TCP Port used by Nova metadata server
# nova_metadata_port = 8775
# (Optional) Shared secret to sign metadata requests
# metadata_shared_secret =
# (Optional) Indicates if Nsxv spoofguard component is used to implement
# port-security feature.
# spoofguard_enabled = True
# (ListOpt) Ordered list of router_types to allocate as tenant routers.
# It limits the router types that the Nsxv can support for tenants:
# distributed: router is supported by distributed edge at the backend.
# shared: multiple routers share the same service edge at the backend.
# exclusive: router exclusivly occupies one service edge at the backend.
# Nsxv would select the first available router type from tenant_router_types
# list if router-type is not specified.
# If the tenant defines the router type with "--distributed",
# "--router_type exclusive" or "--router_type shared", Nsxv would verify that
# the router type is in tenant_router_types.
# Admin supports all these three router types
#
# tenant_router_types = shared, distributed, exclusive
# Example: tenant_router_types = distributed, shared
# (Optional) Enable an administrator to configure the edge user and password
# Username to configure for Edge appliance login
# edge_appliance_user =
# (Optional) Password to configure for Edge appliance login
# edge_appliance_password =
# (Optional) URL for distributed locking coordination resource for lock manager
# This value is passed as a parameter to tooz coordinator.
# By default, value is None and oslo_concurrency is used for single-node
# lock management.
# locking_coordinator_url =
# (Optional) DHCP lease time
# dhcp_lease_time = 86400
[nsx]
# Maximum number of ports for each bridged logical switch
# The recommended value for this parameter varies with NSX version
# Please use:
# NSX 2.x -> 64
# NSX 3.0, 3.1 -> 5000
# NSX 3.2 -> 10000
# max_lp_per_bridged_ls = 5000
# Maximum number of ports for each overlay (stt, gre) logical switch
# max_lp_per_overlay_ls = 256
# Number of connections to each controller node.
# default is 10
# concurrent_connections = 10
# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
# nsx_gen_timeout = -1
# Acceptable values for 'metadata_mode' are:
# - 'access_network': this enables a dedicated connection to the metadata
# proxy for metadata server access via Neutron router.
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
# This option is only useful if running on a host that does not support
# namespaces otherwise access_network should be used.
# metadata_mode = access_network
# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
# default_transport_type = stt
# Specifies in which mode the plugin needs to operate in order to provide DHCP and
# metadata proxy services to tenant instances. If 'agent' is chosen (default)
# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
# provide such services. In this mode, the plugin supports API extensions 'agent'
# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
# the plugin will use NSX logical services for DHCP and metadata proxy. This
# simplifies the deployment model for Neutron, in that the plugin no longer requires
# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above.
# Furthermore, a 'combined' mode is also provided and is used to support existing
# deployments that want to adopt the agentless mode going forward. With this mode,
# existing networks keep being served by the existing infrastructure (thus preserving
# backward compatibility, whereas new networks will be served by the new infrastructure.
# Migration tools are provided to 'move' one network from one model to another; with
# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
# ignored, as new networks will no longer be scheduled to existing dhcp agents.
# agent_mode = agent
# Specifies which mode packet replication should be done in. If set to service
# a service node is required in order to perform packet replication. This can
# also be set to source if one wants replication to be performed locally (NOTE:
# usually only useful for testing if one does not want to deploy a service node).
# In order to leverage distributed routers, replication_mode should be set to
# "service".
# replication_mode = service
[nsx_sync]
# Interval in seconds between runs of the status synchronization task.
# The plugin will aim at resynchronizing operational status for all
# resources in this interval, and it should be therefore large enough
# to ensure the task is feasible. Otherwise the plugin will be
# constantly synchronizing resource status, ie: a new task is started
# as soon as the previous is completed.
# If this value is set to 0, the state synchronization thread for this
# Neutron instance will be disabled.
# state_sync_interval = 10
# Random additional delay between two runs of the state synchronization task.
# An additional wait time between 0 and max_random_sync_delay seconds
# will be added on top of state_sync_interval.
# max_random_sync_delay = 0
# Minimum delay, in seconds, between two status synchronization requests for NSX.
# Depending on chunk size, controller load, and other factors, state
# synchronization requests might be pretty heavy. This means the
# controller might take time to respond, and its load might be quite
# increased by them. This parameter allows to specify a minimum
# interval between two subsequent requests.
# The value for this parameter must never exceed state_sync_interval.
# If this does, an error will be raised at startup.
# min_sync_req_delay = 1
# Minimum number of resources to be retrieved from NSX in a single status
# synchronization request.
# The actual size of the chunk will increase if the number of resources is such
# that using the minimum chunk size will cause the interval between two
# requests to be less than min_sync_req_delay
# min_chunk_size = 500
# Enable this option to allow punctual state synchronization on show
# operations. In this way, show operations will always fetch the operational
# status of the resource from the NSX backend, and this might have
# a considerable impact on overall performance.
# always_read_status = False
[nsx_lsn]
# Pull LSN information from NSX in case it is missing from the local
# data store. This is useful to rebuild the local store in case of
# server recovery
# sync_on_missing_data = False
[nsx_dhcp]
# (Optional) Comma separated list of additional dns servers. Default is an empty list
# extra_domain_name_servers =
# Domain to use for building the hostnames
# domain_name = openstacklocal
# Default DHCP lease time
# default_lease_time = 43200
[nsx_metadata]
# IP address used by Metadata server
# metadata_server_address = 127.0.0.1
# TCP Port used by Metadata server
# metadata_server_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it MUST match with the configuration used by the Metadata server
# metadata_shared_secret =

View File

@ -1,10 +0,0 @@
{
"create_network_gateway": "rule:admin_or_owner",
"update_network_gateway": "rule:admin_or_owner",
"delete_network_gateway": "rule:admin_or_owner",
"connect_network": "rule:admin_or_owner",
"disconnect_network": "rule:admin_or_owner",
"create_gateway_device": "rule:admin_or_owner",
"update_gateway_device": "rule:admin_or_owner",
"delete_gateway_device": "rule_admin_or_owner"
}

View File

@ -1,7 +0,0 @@
{
"create_router:external_gateway_info:enable_snat": "rule:admin_or_owner",
"create_router:distributed": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_or_owner",
"update_router:external_gateway_info:enable_snat": "rule:admin_or_owner",
"update_router:distributed": "rule:admin_or_owner"
}

View File

@ -0,0 +1,16 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# Filters for the dibbler-based reference implementation of the pluggable
# Prefix Delegation driver. Other implementations using an alternative agent
# should include a similar filter in this folder.
# prefix_delegation_agent
dibbler-client: CommandFilter, dibbler-client, root

View File

@ -1,12 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# nec_neutron_agent
ovs-vsctl: CommandFilter, ovs-vsctl, root

View File

@ -1,8 +1,10 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
"owner": "tenant_id:%(tenant_id)s",
"admin_or_owner": "rule:context_is_admin or rule:owner",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
@ -62,7 +64,7 @@
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:allowed_address_pairs": "rule:admin_or_network_owner",
"get_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
@ -76,7 +78,7 @@
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:allowed_address_pairs": "rule:admin_or_network_owner",
"delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
"delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
@ -174,5 +176,23 @@
"update_service_profile": "rule:admin_only",
"delete_service_profile": "rule:admin_only",
"get_service_profiles": "rule:admin_only",
"get_service_profile": "rule:admin_only"
"get_service_profile": "rule:admin_only",
"get_policy": "rule:regular_user",
"create_policy": "rule:admin_only",
"update_policy": "rule:admin_only",
"delete_policy": "rule:admin_only",
"get_policy_bandwidth_limit_rule": "rule:regular_user",
"create_policy_bandwidth_limit_rule": "rule:admin_only",
"delete_policy_bandwidth_limit_rule": "rule:admin_only",
"update_policy_bandwidth_limit_rule": "rule:admin_only",
"get_rule_type": "rule:regular_user",
"restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only",
"create_rbac_policy": "",
"create_rbac_policy:target_tenant": "rule:restrict_wildcard",
"update_rbac_policy": "rule:admin_or_owner",
"update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner",
"get_rbac_policy": "rule:admin_or_owner",
"delete_rbac_policy": "rule:admin_or_owner"
}

View File

@ -10,7 +10,7 @@ filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin
# Enable logging to syslog
# Default value is False

View File

@ -30,6 +30,8 @@ from neutron.agent.ovsdb import api as ovsdb
from neutron.common import exceptions
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
# Default timeout for ovs-vsctl command
DEFAULT_OVS_VSCTL_TIMEOUT = 10
@ -102,8 +104,11 @@ class BaseOVS(object):
self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
self.ovsdb = ovsdb.API.get(self)
def add_bridge(self, bridge_name):
self.ovsdb.add_br(bridge_name).execute()
def add_bridge(self, bridge_name,
datapath_type=constants.OVS_DATAPATH_SYSTEM):
self.ovsdb.add_br(bridge_name,
datapath_type).execute()
br = OVSBridge(bridge_name)
# Don't return until vswitchd sets up the internal port
br.get_port_ofport(bridge_name)
@ -143,9 +148,10 @@ class BaseOVS(object):
class OVSBridge(BaseOVS):
def __init__(self, br_name):
def __init__(self, br_name, datapath_type=constants.OVS_DATAPATH_SYSTEM):
super(OVSBridge, self).__init__()
self.br_name = br_name
self.datapath_type = datapath_type
def set_controller(self, controllers):
self.ovsdb.set_controller(self.br_name,
@ -171,8 +177,14 @@ class OVSBridge(BaseOVS):
self.set_db_attribute('Bridge', self.br_name, 'protocols', protocols,
check_error=True)
def create(self):
self.ovsdb.add_br(self.br_name).execute()
def create(self, secure_mode=False):
with self.ovsdb.transaction() as txn:
txn.add(
self.ovsdb.add_br(self.br_name,
datapath_type=self.datapath_type))
if secure_mode:
txn.add(self.ovsdb.set_fail_mode(self.br_name,
FAILMODE_SECURE))
# Don't return until vswitchd sets up the internal port
self.get_port_ofport(self.br_name)
@ -182,7 +194,8 @@ class OVSBridge(BaseOVS):
def reset_bridge(self, secure_mode=False):
with self.ovsdb.transaction() as txn:
txn.add(self.ovsdb.del_br(self.br_name))
txn.add(self.ovsdb.add_br(self.br_name))
txn.add(self.ovsdb.add_br(self.br_name,
datapath_type=self.datapath_type))
if secure_mode:
txn.add(self.ovsdb.set_fail_mode(self.br_name,
FAILMODE_SECURE))
@ -268,6 +281,10 @@ class OVSBridge(BaseOVS):
if 'NXST' not in item)
return retval
def dump_all_flows(self):
return [f for f in self.run_ofctl("dump-flows", []).splitlines()
if 'NXST' not in f]
def deferred(self, **kwargs):
return DeferredOVSBridge(self, **kwargs)
@ -489,6 +506,36 @@ class OVSBridge(BaseOVS):
txn.add(self.ovsdb.db_set('Controller',
controller_uuid, *attr))
def _set_egress_bw_limit_for_port(self, port_name, max_kbps,
max_burst_kbps):
with self.ovsdb.transaction(check_error=True) as txn:
txn.add(self.ovsdb.db_set('Interface', port_name,
('ingress_policing_rate', max_kbps)))
txn.add(self.ovsdb.db_set('Interface', port_name,
('ingress_policing_burst',
max_burst_kbps)))
def create_egress_bw_limit_for_port(self, port_name, max_kbps,
max_burst_kbps):
self._set_egress_bw_limit_for_port(
port_name, max_kbps, max_burst_kbps)
def get_egress_bw_limit_for_port(self, port_name):
max_kbps = self.db_get_val('Interface', port_name,
'ingress_policing_rate')
max_burst_kbps = self.db_get_val('Interface', port_name,
'ingress_policing_burst')
max_kbps = max_kbps or None
max_burst_kbps = max_burst_kbps or None
return max_kbps, max_burst_kbps
def delete_egress_bw_limit_for_port(self, port_name):
self._set_egress_bw_limit_for_port(
port_name, 0, 0)
def __enter__(self):
self.create()
return self

View File

@ -24,6 +24,8 @@ DHCP_AGENT_OPTS = [
help=_("The driver used to manage the DHCP server.")),
cfg.BoolOpt('enable_isolated_metadata', default=False,
help=_("Support Metadata requests on isolated networks.")),
cfg.BoolOpt('force_metadata', default=False,
help=_("Force to use DHCP to get Metadata on all networks.")),
cfg.BoolOpt('enable_metadata_network', default=False,
help=_("Allows for serving metadata requests from a "
"dedicated network. Requires "
@ -38,7 +40,11 @@ DHCP_OPTS = [
help=_('Location to store DHCP server config files')),
cfg.StrOpt('dhcp_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
help=_('Domain to use for building the hostnames.'
'This option is deprecated. It has been moved to '
'neutron.conf as dns_domain. It will removed from here '
'in a future release'),
deprecated_for_removal=True),
]
DNSMASQ_OPTS = [

View File

@ -0,0 +1,59 @@
# Copyright (c) 2015 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AgentCoreResourceExtension(object):
"""Define stable abstract interface for agent extensions.
An agent extension extends the agent core functionality.
"""
def initialize(self, connection, driver_type):
"""Perform agent core resource extension initialization.
:param connection: RPC connection that can be reused by the extension
to define its RPC endpoints
:param driver_type: a string that defines the agent type to the
extension. Can be used to choose the right backend
implementation.
Called after all extensions have been loaded.
No port handling will be called before this method.
"""
@abc.abstractmethod
def handle_port(self, context, data):
"""Handle agent extension for port.
This can be called on either create or update, depending on the
code flow. Thus, it's this function's responsibility to check what
actually changed.
:param context - rpc context
:param data - port data
"""
@abc.abstractmethod
def delete_port(self, context, data):
"""Delete port from agent extension.
:param context - rpc context
:param data - port data
"""

View File

@ -0,0 +1,85 @@
# Copyright (c) 2015 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
import stevedore
from neutron.i18n import _LE, _LI
LOG = log.getLogger(__name__)
L2_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l2.extensions'
L2_AGENT_EXT_MANAGER_OPTS = [
cfg.ListOpt('extensions',
default=[],
help=_('Extensions list to use')),
]
def register_opts(conf):
conf.register_opts(L2_AGENT_EXT_MANAGER_OPTS, 'agent')
class AgentExtensionsManager(stevedore.named.NamedExtensionManager):
"""Manage agent extensions."""
def __init__(self, conf):
super(AgentExtensionsManager, self).__init__(
L2_AGENT_EXT_MANAGER_NAMESPACE, conf.agent.extensions,
invoke_on_load=True, name_order=True)
LOG.info(_LI("Loaded agent extensions: %s"), self.names())
def initialize(self, connection, driver_type):
"""Initialize enabled L2 agent extensions.
:param connection: RPC connection that can be reused by extensions to
define their RPC endpoints
:param driver_type: a string that defines the agent type to the
extension. Can be used by the extension to choose
the right backend implementation.
"""
# Initialize each agent extension in the list.
for extension in self:
LOG.info(_LI("Initializing agent extension '%s'"), extension.name)
extension.obj.initialize(connection, driver_type)
def handle_port(self, context, data):
"""Notify all agent extensions to handle port."""
for extension in self:
try:
extension.obj.handle_port(context, data)
# TODO(QoS) add agent extensions exception and catch them here
except AttributeError:
LOG.exception(
_LE("Agent Extension '%(name)s' failed "
"while handling port update"),
{'name': extension.name}
)
def delete_port(self, context, data):
"""Notify all agent extensions to delete port."""
for extension in self:
try:
extension.obj.delete_port(context, data)
# TODO(QoS) add agent extensions exception and catch them here
# instead of AttributeError
except AttributeError:
LOG.exception(
_LE("Agent Extension '%(name)s' failed "
"while handling port deletion"),
{'name': extension.name}
)

View File

@ -0,0 +1,149 @@
# Copyright (c) 2015 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from oslo_concurrency import lockutils
import six
from neutron.agent.l2 import agent_extension
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron import manager
@six.add_metaclass(abc.ABCMeta)
class QosAgentDriver(object):
"""Defines stable abstract interface for QoS Agent Driver.
QoS Agent driver defines the interface to be implemented by Agent
for applying QoS Rules on a port.
"""
@abc.abstractmethod
def initialize(self):
"""Perform QoS agent driver initialization.
"""
@abc.abstractmethod
def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
#TODO(QoS) we may want to provide default implementations of calling
#delete and then update
@abc.abstractmethod
def update(self, port, qos_policy):
"""Apply QoS rules on port.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
@abc.abstractmethod
def delete(self, port, qos_policy):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
SUPPORTED_RESOURCES = [resources.QOS_POLICY]
def initialize(self, connection, driver_type):
"""Perform Agent Extension initialization.
"""
self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
self.qos_driver = manager.NeutronManager.load_class_for_provider(
'neutron.qos.agent_drivers', driver_type)()
self.qos_driver.initialize()
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_ports = set()
registry.subscribe(self._handle_notification, resources.QOS_POLICY)
self._register_rpc_consumers(connection)
def _register_rpc_consumers(self, connection):
endpoints = [resources_rpc.ResourcesPushRpcCallback()]
for resource_type in self.SUPPORTED_RESOURCES:
# we assume that neutron-server always broadcasts the latest
# version known to the agent
topic = resources_rpc.resource_type_versioned_topic(resource_type)
connection.create_consumer(topic, endpoints, fanout=True)
@lockutils.synchronized('qos-port')
def _handle_notification(self, qos_policy, event_type):
# server does not allow to remove a policy that is attached to any
# port, so we ignore DELETED events. Also, if we receive a CREATED
# event for a policy, it means that there are no ports so far that are
# attached to it. That's why we are interested in UPDATED events only
if event_type == events.UPDATED:
self._process_update_policy(qos_policy)
@lockutils.synchronized('qos-port')
def handle_port(self, context, port):
"""Handle agent QoS extension for port.
This method applies a new policy to a port using the QoS driver.
Update events are handled in _handle_notification.
"""
port_id = port['port_id']
qos_policy_id = port.get('qos_policy_id')
if qos_policy_id is None:
self._process_reset_port(port)
return
#Note(moshele) check if we have seen this port
#and it has the same policy we do nothing.
if (port_id in self.known_ports and
port_id in self.qos_policy_ports[qos_policy_id]):
return
self.qos_policy_ports[qos_policy_id][port_id] = port
self.known_ports.add(port_id)
qos_policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, qos_policy_id)
self.qos_driver.create(port, qos_policy)
def delete_port(self, context, port):
self._process_reset_port(port)
def _process_update_policy(self, qos_policy):
for port_id, port in self.qos_policy_ports[qos_policy.id].items():
# TODO(QoS): for now, just reflush the rules on the port. Later, we
# may want to apply the difference between the rules lists only.
self.qos_driver.delete(port, None)
self.qos_driver.update(port, qos_policy)
def _process_reset_port(self, port):
port_id = port['port_id']
if port_id in self.known_ports:
self.known_ports.remove(port_id)
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
self.qos_driver.delete(port, None)
return

View File

@ -36,6 +36,7 @@ from neutron.agent.l3 import router_info as rinf
from neutron.agent.l3 import router_processing_queue as queue
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import pd
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.callbacks import events
@ -78,6 +79,7 @@ class L3PluginApi(object):
1.4 - Added L3 HA update_router_state. This method was reworked in
to update_ha_routers_states
1.5 - Added update_ha_routers_states
1.6 - Added process_prefix_update
"""
@ -131,6 +133,12 @@ class L3PluginApi(object):
return cctxt.call(context, 'update_ha_routers_states',
host=self.host, states=states)
def process_prefix_update(self, context, prefix_update):
"""Process prefix update whenever prefixes get changed."""
cctxt = self.client.prepare(version='1.6')
return cctxt.call(context, 'process_prefix_update',
subnets=prefix_update)
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ha.AgentMixin,
@ -218,6 +226,12 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
self.target_ex_net_id = None
self.use_ipv6 = ipv6_utils.is_enabled()
self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
self.driver,
self.plugin_rpc.process_prefix_update,
self.create_pd_router_update,
self.conf)
def _check_config_params(self):
"""Check items in configuration files.
@ -440,6 +454,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
for rp, update in self._queue.each_update_to_next_router():
LOG.debug("Starting router update for %s, action %s, priority %s",
update.id, update.action, update.priority)
if update.action == queue.PD_UPDATE:
self.pd.process_prefix_update()
continue
router = update.router
if update.action != queue.DELETE_ROUTER and not router:
try:
@ -574,6 +591,14 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
# When L3 agent is ready, we immediately do a full sync
self.periodic_sync_routers_task(self.context)
def create_pd_router_update(self):
router_id = None
update = queue.RouterUpdate(router_id,
queue.PRIORITY_PD_UPDATE,
timestamp=timeutils.utcnow(),
action=queue.PD_UPDATE)
self._queue.add(update)
class L3NATAgentWithStateReport(L3NATAgent):
@ -646,6 +671,8 @@ class L3NATAgentWithStateReport(L3NATAgent):
# When L3 agent is ready, we immediately do a full sync
self.periodic_sync_routers_task(self.context)
self.pd.after_start()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True

View File

@ -74,6 +74,13 @@ OPTS = [
"next-hop using a global unique address (GUA) is "
"desired, it needs to be done via a subnet allocated "
"to the network and not through this parameter. ")),
cfg.StrOpt('prefix_delegation_driver',
default='dibbler',
help=_('Driver used for ipv6 prefix delegation. This needs to '
'be an entry point defined in the '
'neutron.agent.linux.pd_drivers namespace. See '
'setup.cfg for entry points included with the neutron '
'source.')),
cfg.BoolOpt('enable_metadata_proxy', default=True,
help=_("Allow running metadata proxy.")),
cfg.BoolOpt('router_delete_namespaces', default=True,

View File

@ -40,17 +40,27 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
if not self._is_this_snat_host():
# no centralized SNAT gateway for this node/agent
LOG.debug("not hosting snat for router: %s", self.router['id'])
if self.snat_namespace:
LOG.debug("SNAT was rescheduled to host %s. Clearing snat "
"namespace.", self.router.get('gw_port_host'))
return self.external_gateway_removed(
ex_gw_port, interface_name)
return
self._external_gateway_added(ex_gw_port,
interface_name,
self.snat_namespace.name,
preserve_ips=[])
if not self.snat_namespace:
# SNAT might be rescheduled to this agent; need to process like
# newly created gateway
return self.external_gateway_added(ex_gw_port, interface_name)
else:
self._external_gateway_added(ex_gw_port,
interface_name,
self.snat_namespace.name,
preserve_ips=[])
def external_gateway_removed(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
if not self._is_this_snat_host():
if not self._is_this_snat_host() and not self.snat_namespace:
# no centralized SNAT gateway for this node/agent
LOG.debug("not hosting snat for router: %s", self.router['id'])
return
@ -75,7 +85,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
return
ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])
interface_name = self.get_snat_int_device_name(sn_port['id'])
interface_name = self._get_snat_int_device_name(sn_port['id'])
self._internal_network_added(
ns_name,
sn_port['network_id'],
@ -100,7 +110,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
if not is_this_snat_host:
return
snat_interface = self.get_snat_int_device_name(sn_port['id'])
snat_interface = self._get_snat_int_device_name(sn_port['id'])
ns_name = self.snat_namespace.name
prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX
if ip_lib.device_exists(snat_interface, namespace=ns_name):
@ -109,11 +119,11 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
def _create_dvr_gateway(self, ex_gw_port, gw_interface_name):
"""Create SNAT namespace."""
snat_ns = self.create_snat_namespace()
snat_ns = self._create_snat_namespace()
# connect snat_ports to br_int from SNAT namespace
for port in self.get_snat_interfaces():
# create interface_name
interface_name = self.get_snat_int_device_name(port['id'])
interface_name = self._get_snat_int_device_name(port['id'])
self._internal_network_added(
snat_ns.name, port['network_id'],
port['id'], port['fixed_ips'],
@ -127,7 +137,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
# kicks the FW Agent to add rules for the snat namespace
self.agent.process_router_add(self)
def create_snat_namespace(self):
def _create_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that creates a gateway for a dvr. The first step
# is to move the creation of the snat namespace here
@ -138,7 +148,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
self.snat_namespace.create()
return self.snat_namespace
def get_snat_int_device_name(self, port_id):
def _get_snat_int_device_name(self, port_id):
long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id
return long_name[:self.driver.DEV_NAME_LEN]
@ -166,3 +176,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
self._add_snat_rules(ex_gw_port, self.snat_iptables_manager,
interface_name)
def update_routing_table(self, operation, route, namespace=None):
ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])
super(DvrEdgeRouter, self).update_routing_table(operation, route,
namespace=ns_name)

View File

@ -14,13 +14,13 @@
import os
from oslo_log import log as logging
from neutron.agent.l3 import fip_rule_priority_allocator as frpa
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.common import utils as common_utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
@ -49,7 +49,10 @@ class FipNamespace(namespaces.Namespace):
self.use_ipv6 = use_ipv6
self.agent_gateway_port = None
self._subscribers = set()
self._rule_priorities = set(range(FIP_PR_START, FIP_PR_END))
path = os.path.join(agent_conf.state_path, 'fip-priorities')
self._rule_priorities = frpa.FipRulePriorityAllocator(path,
FIP_PR_START,
FIP_PR_END)
self._iptables_manager = iptables_manager.IptablesManager(
namespace=self.get_name(),
use_ipv6=self.use_ipv6)
@ -85,14 +88,15 @@ class FipNamespace(namespaces.Namespace):
self._subscribers.discard(router_id)
return not self.has_subscribers()
def allocate_rule_priority(self):
return self._rule_priorities.pop()
def allocate_rule_priority(self, floating_ip):
return self._rule_priorities.allocate(floating_ip)
def deallocate_rule_priority(self, rule_pr):
self._rule_priorities.add(rule_pr)
def deallocate_rule_priority(self, floating_ip):
self._rule_priorities.release(floating_ip)
def _gateway_added(self, ex_gw_port, interface_name):
"""Add Floating IP gateway port."""
LOG.debug("add gateway interface(%s)", interface_name)
ns_name = self.get_name()
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'],
@ -126,6 +130,7 @@ class FipNamespace(namespaces.Namespace):
def create(self):
# TODO(Carl) Get this functionality from mlavelle's namespace baseclass
LOG.debug("add fip-namespace(%s)", self.name)
ip_wrapper_root = ip_lib.IPWrapper()
ip_wrapper_root.netns.execute(['sysctl',
'-w',
@ -172,7 +177,6 @@ class FipNamespace(namespaces.Namespace):
"""
self.agent_gateway_port = agent_gateway_port
# add fip-namespace and agent_gateway_port
self.create()
iface_name = self.get_ext_device_name(agent_gateway_port['id'])
@ -186,6 +190,7 @@ class FipNamespace(namespaces.Namespace):
def create_rtr_2_fip_link(self, ri):
"""Create interface between router and Floating IP namespace."""
LOG.debug("Create FIP link interfaces for router %s", ri.router_id)
rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id)
fip_2_rtr_name = self.get_int_device_name(ri.router_id)
fip_ns_name = self.get_name()
@ -217,7 +222,7 @@ class FipNamespace(namespaces.Namespace):
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name)
device.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL)
#setup the NAT rules and chains
ri._handle_fip_nat_rules(rtr_2_fip_name, 'add_rules')
ri._handle_fip_nat_rules(rtr_2_fip_name)
def scan_fip_ports(self, ri):
# don't scan if not dvr or count is not None
@ -232,4 +237,8 @@ class FipNamespace(namespaces.Namespace):
existing_cidrs = [addr['cidr'] for addr in device.addr.list()]
fip_cidrs = [c for c in existing_cidrs if
common_utils.is_cidr_host(c)]
for fip_cidr in fip_cidrs:
fip_ip = fip_cidr.split('/')[0]
rule_pr = self._rule_priorities.allocate(fip_ip)
ri.floating_ips_dict[fip_ip] = rule_pr
ri.dist_fip_count = len(fip_cidrs)

View File

@ -47,7 +47,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
floating_ips = super(DvrLocalRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
def _handle_fip_nat_rules(self, interface_name, action):
def _handle_fip_nat_rules(self, interface_name):
"""Configures NAT rules for Floating IPs for DVR.
Remove all the rules. This is safe because if
@ -61,20 +61,20 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action is add_rules
if action == 'add_rules' and interface_name:
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
# And add the NAT rule back
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
self.iptables_manager.apply()
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_ns.allocate_rule_priority()
rule_pr = self.fip_ns.allocate_rule_priority(floating_ip)
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
@ -113,7 +113,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
ip_rule.rule.delete(ip=floating_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
self.fip_ns.deallocate_rule_priority(rule_pr)
self.fip_ns.deallocate_rule_priority(floating_ip)
#TODO(rajeev): Handle else case - exception/log?
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
@ -265,7 +265,8 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
if is_add:
exc = _LE('DVR: error adding redirection logic')
else:
exc = _LE('DVR: removed snat failed')
exc = _LE('DVR: snat remove failed to clear the rule '
'and device')
LOG.exception(exc)
def _snat_redirect_add(self, gateway, sn_port, sn_int):
@ -373,8 +374,9 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
floating_ips = self.get_floating_ips()
fip_agent_port = self.get_floating_agent_gw_interface(
ex_gw_port['network_id'])
LOG.debug("FloatingIP agent gateway port received from the plugin: "
"%s", fip_agent_port)
if fip_agent_port:
LOG.debug("FloatingIP agent gateway port received from the "
"plugin: %s", fip_agent_port)
is_first = False
if floating_ips:
is_first = self.fip_ns.subscribe(self.router_id)

View File

@ -39,4 +39,8 @@ class DvrRouterBase(router.RouterInfo):
if match_port:
return match_port[0]
else:
LOG.error(_LE('DVR: no map match_port found!'))
LOG.error(_LE('DVR: SNAT port not found in the list '
'%(snat_list)s for the given router '
' internal port %(int_p)s'), {
'snat_list': snat_ports,
'int_p': int_port})

View File

@ -0,0 +1,53 @@
# Copyright 2015 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.l3.item_allocator import ItemAllocator
class FipPriority(object):
def __init__(self, index):
self.index = index
def __repr__(self):
return str(self.index)
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
if isinstance(other, FipPriority):
return (self.index == other.index)
else:
return False
class FipRulePriorityAllocator(ItemAllocator):
"""Manages allocation of floating ips rule priorities.
IP rule priorities assigned to DVR floating IPs need
to be preserved over L3 agent restarts.
This class provides an allocator which saves the prirorities
to a datastore which will survive L3 agent restarts.
"""
def __init__(self, data_store_path, priority_rule_start,
priority_rule_end):
"""Create the necessary pool and create the item allocator
using ',' as the delimiter and FipRulePriorityAllocator as the
class type
"""
pool = set(FipPriority(str(s)) for s in range(priority_rule_start,
priority_rule_end))
super(FipRulePriorityAllocator, self).__init__(data_store_path,
FipPriority,
pool)

View File

@ -333,6 +333,16 @@ class HaRouter(router.RouterInfo):
self.ha_state = state
callback(self.router_id, state)
@staticmethod
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return {k: v for k, v in d.items() if k not in ignore}
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
def external_gateway_added(self, ex_gw_port, interface_name):
self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
self._add_gateway_vip(ex_gw_port, interface_name)

View File

@ -15,7 +15,6 @@
import netaddr
from oslo_log import log as logging
import six
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
@ -23,6 +22,7 @@ from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ra
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.i18n import _LW
@ -110,12 +110,17 @@ class RouterInfo(object):
def get_external_device_interface_name(self, ex_gw_port):
return self.get_external_device_name(ex_gw_port['id'])
def _update_routing_table(self, operation, route):
def _update_routing_table(self, operation, route, namespace):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def update_routing_table(self, operation, route, namespace=None):
if namespace is None:
namespace = self.ns_name
self._update_routing_table(operation, route, namespace)
def routes_updated(self):
new_routes = self.router['routes']
@ -129,10 +134,10 @@ class RouterInfo(object):
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table('replace', route)
self.update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
self._update_routing_table('delete', route)
self.update_routing_table('delete', route)
self.routes = new_routes
def get_ex_gw_port(self):
@ -239,6 +244,8 @@ class RouterInfo(object):
ip_cidr for ip_cidr in existing_cidrs - new_cidrs
if common_utils.is_cidr_host(ip_cidr))
for ip_cidr in fips_to_remove:
LOG.debug("Removing floating ip %s from interface %s in "
"namespace %s", ip_cidr, interface_name, self.ns_name)
self.remove_floating_ip(device, ip_cidr)
return fip_statuses
@ -266,9 +273,28 @@ class RouterInfo(object):
if self.router_namespace:
self.router_namespace.delete()
def _internal_network_updated(self, port, subnet_id, prefix, old_prefix,
updated_cidrs):
interface_name = self.get_internal_device_name(port['id'])
if prefix != l3_constants.PROVISIONAL_IPV6_PD_PREFIX:
fixed_ips = port['fixed_ips']
for fixed_ip in fixed_ips:
if fixed_ip['subnet_id'] == subnet_id:
v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'],
fixed_ip.get('prefixlen'))
if v6addr not in updated_cidrs:
self.driver.add_ipv6_addr(interface_name, v6addr,
self.ns_name)
else:
self.driver.delete_ipv6_addr_with_prefix(interface_name,
old_prefix,
self.ns_name)
def _internal_network_added(self, ns_name, network_id, port_id,
fixed_ips, mac_address,
interface_name, prefix):
LOG.debug("adding internal network: prefix(%s), port(%s)",
prefix, port_id)
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ns_name,
prefix=prefix)
@ -300,7 +326,8 @@ class RouterInfo(object):
def internal_network_removed(self, port):
interface_name = self.get_internal_device_name(port['id'])
LOG.debug("removing internal network: port(%s) interface(%s)",
port['id'], interface_name)
if ip_lib.device_exists(interface_name, namespace=self.ns_name):
self.driver.unplug(interface_name, namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
@ -326,7 +353,8 @@ class RouterInfo(object):
def _port_has_ipv6_subnet(port):
if 'subnets' in port:
for subnet in port['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
if (netaddr.IPNetwork(subnet['cidr']).version == 6 and
subnet['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX):
return True
def enable_radvd(self, internal_ports=None):
@ -344,7 +372,7 @@ class RouterInfo(object):
self.driver.init_l3(interface_name, ip_cidrs=ip_cidrs,
namespace=self.ns_name)
def _process_internal_ports(self):
def _process_internal_ports(self, pd):
existing_port_ids = set(p['id'] for p in self.internal_ports)
internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
@ -361,14 +389,26 @@ class RouterInfo(object):
enable_ra = False
for p in new_ports:
self.internal_network_added(p)
LOG.debug("appending port %s to internal_ports cache", p)
self.internal_ports.append(p)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
for subnet in p['subnets']:
if ipv6_utils.is_ipv6_pd_enabled(subnet):
interface_name = self.get_internal_device_name(p['id'])
pd.enable_subnet(self.router_id, subnet['id'],
subnet['cidr'],
interface_name, p['mac_address'])
for p in old_ports:
self.internal_network_removed(p)
LOG.debug("removing port %s from internal_ports cache", p)
self.internal_ports.remove(p)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
for subnet in p['subnets']:
if ipv6_utils.is_ipv6_pd_enabled(subnet):
pd.disable_subnet(self.router_id, subnet['id'])
updated_cidrs = []
if updated_ports:
for index, p in enumerate(internal_ports):
if not updated_ports.get(p['id']):
@ -376,9 +416,26 @@ class RouterInfo(object):
self.internal_ports[index] = updated_ports[p['id']]
interface_name = self.get_internal_device_name(p['id'])
ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
LOG.debug("updating internal network for port %s", p)
updated_cidrs += ip_cidrs
self.internal_network_updated(interface_name, ip_cidrs)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
# Check if there is any pd prefix update
for p in internal_ports:
if p['id'] in (set(current_port_ids) & set(existing_port_ids)):
for subnet in p.get('subnets', []):
if ipv6_utils.is_ipv6_pd_enabled(subnet):
old_prefix = pd.update_subnet(self.router_id,
subnet['id'],
subnet['cidr'])
if old_prefix:
self._internal_network_updated(p, subnet['id'],
subnet['cidr'],
old_prefix,
updated_cidrs)
enable_ra = True
# Enable RA
if enable_ra:
self.enable_radvd(internal_ports)
@ -392,6 +449,7 @@ class RouterInfo(object):
for stale_dev in stale_devs:
LOG.debug('Deleting stale internal router device: %s',
stale_dev)
pd.remove_stale_ri_ifname(self.router_id, stale_dev)
self.driver.unplug(stale_dev,
namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
@ -433,6 +491,8 @@ class RouterInfo(object):
def _external_gateway_added(self, ex_gw_port, interface_name,
ns_name, preserve_ips):
LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)",
ex_gw_port, interface_name, ns_name)
self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
# Build up the interface and gateway IP addresses that
@ -474,12 +534,18 @@ class RouterInfo(object):
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_removed(self, ex_gw_port, interface_name):
LOG.debug("External gateway removed: port(%s), interface(%s)",
ex_gw_port, interface_name)
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _process_external_gateway(self, ex_gw_port):
@staticmethod
def _gateway_ports_equal(port1, port2):
return port1 == port2
def _process_external_gateway(self, ex_gw_port, pd):
# TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
self.ex_gw_port and self.ex_gw_port['id'])
@ -488,22 +554,14 @@ class RouterInfo(object):
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
if ex_gw_port:
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return dict((k, v) for k, v in six.iteritems(d)
if k not in ignore)
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
if not self.ex_gw_port:
self.external_gateway_added(ex_gw_port, interface_name)
elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port):
pd.add_gw_interface(self.router['id'], interface_name)
elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port):
self.external_gateway_updated(ex_gw_port, interface_name)
elif not ex_gw_port and self.ex_gw_port:
self.external_gateway_removed(self.ex_gw_port, interface_name)
pd.remove_gw_interface(self.router['id'])
existing_devices = self._get_existing_devices()
stale_devs = [dev for dev in existing_devices
@ -511,6 +569,7 @@ class RouterInfo(object):
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug('Deleting stale external router device: %s', stale_dev)
pd.remove_gw_interface(self.router['id'])
self.driver.unplug(stale_dev,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
@ -587,7 +646,7 @@ class RouterInfo(object):
try:
with self.iptables_manager.defer_apply():
ex_gw_port = self.get_ex_gw_port()
self._process_external_gateway(ex_gw_port)
self._process_external_gateway(ex_gw_port, agent.pd)
if not ex_gw_port:
return
@ -618,7 +677,9 @@ class RouterInfo(object):
:param agent: Passes the agent in order to send RPC messages.
"""
self._process_internal_ports()
LOG.debug("process router updates")
self._process_internal_ports(agent.pd)
agent.pd.sync_router(self.router['id'])
self.process_external(agent)
# Process static routes for router
self.routes_updated()

View File

@ -21,7 +21,9 @@ from oslo_utils import timeutils
# Lower value is higher priority
PRIORITY_RPC = 0
PRIORITY_SYNC_ROUTERS_TASK = 1
PRIORITY_PD_UPDATE = 2
DELETE_ROUTER = 1
PD_UPDATE = 2
class RouterUpdate(object):

View File

@ -510,6 +510,11 @@ class Dnsmasq(DhcpLocalProcess):
for port in self.network.ports:
fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
v6_nets)
# Confirm whether Neutron server supports dns_name attribute in the
# ports API
dns_assignment = getattr(port, 'dns_assignment', None)
if dns_assignment:
dns_ip_map = {d.ip_address: d for d in dns_assignment}
for alloc in fixed_ips:
# Note(scollins) Only create entries that are
# associated with the subnet being managed by this
@ -523,11 +528,18 @@ class Dnsmasq(DhcpLocalProcess):
yield (port, alloc, hostname, fqdn)
continue
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
# If dns_name attribute is supported by ports API, return the
# dns_assignment generated by the Neutron server. Otherwise,
# generate hostname and fqdn locally (previous behaviour)
if dns_assignment:
hostname = dns_ip_map[alloc.ip_address].hostname
fqdn = dns_ip_map[alloc.ip_address].fqdn
else:
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn)
def _get_port_extra_dhcp_opts(self, port):
@ -761,9 +773,10 @@ class Dnsmasq(DhcpLocalProcess):
# Add host routes for isolated network segments
if (isolated_subnets[subnet.id] and
if (self.conf.force_metadata or
(isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata and
subnet.ip_version == 4):
subnet.ip_version == 4)):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
@ -900,7 +913,7 @@ class Dnsmasq(DhcpLocalProcess):
A subnet is considered non-isolated if there is a port connected to
the subnet, and the port's ip address matches that of the subnet's
gateway. The port must be owned by a nuetron router.
gateway. The port must be owned by a neutron router.
"""
isolated_subnets = collections.defaultdict(lambda: True)
subnets = dict((subnet.id, subnet) for subnet in network.subnets)
@ -919,7 +932,8 @@ class Dnsmasq(DhcpLocalProcess):
"""Determine whether the metadata proxy is needed for a network
This method returns True for truly isolated networks (ie: not attached
to a router), when the enable_isolated_metadata flag is True.
to a router) when enable_isolated_metadata is True, or for all the
networks when the force_metadata flags is True.
This method also returns True when enable_metadata_network is True,
and the network passed as a parameter has a subnet in the link-local
@ -928,6 +942,9 @@ class Dnsmasq(DhcpLocalProcess):
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
if conf.force_metadata:
return True
if conf.enable_metadata_network and conf.enable_isolated_metadata:
# check if the network has a metadata subnet
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
@ -996,77 +1013,111 @@ class DeviceManager(object):
device.route.delete_gateway(gateway)
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets):
"""Set up the existing DHCP port, if there is one."""
device_id = self.get_device_id(network)
subnets = {subnet.id: subnet for subnet in network.subnets
if subnet.enable_dhcp}
# To avoid pylint thinking that port might be undefined after
# the following loop...
port = None
dhcp_port = None
# Look for an existing DHCP for this network.
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
dhcp_enabled_subnet_ids = set(subnets)
port_fixed_ips = []
for fixed_ip in port.fixed_ips:
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
port_fixed_ips.append(
{'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
# If there is a new dhcp enabled subnet or a port that is no
# longer on a dhcp enabled subnet, we need to call update.
if dhcp_enabled_subnet_ids != port_subnet_ids:
port_fixed_ips.extend(
dict(subnet_id=s)
for s in dhcp_enabled_subnet_ids - port_subnet_ids)
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'fixed_ips': port_fixed_ips}})
if not dhcp_port:
raise exceptions.Conflict()
else:
dhcp_port = port
# break since we found port that matches device_id
break
else:
return None
# check for a reserved DHCP port
if dhcp_port is None:
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
if dhcp_port:
break
# Compare what the subnets should be against what is already
# on the port.
dhcp_enabled_subnet_ids = set(dhcp_subnets)
port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
# DHCP port has not yet been created.
if dhcp_port is None:
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.', {'device_id': device_id,
'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in subnets])
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
# If those differ, we need to call update.
if dhcp_enabled_subnet_ids != port_subnet_ids:
# Collect the subnets and fixed IPs that the port already
# has, for subnets that are still in the DHCP-enabled set.
wanted_fixed_ips = []
for fixed_ip in port.fixed_ips:
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
wanted_fixed_ips.append(
{'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
if not dhcp_port:
# Add subnet IDs for new DHCP-enabled subnets.
wanted_fixed_ips.extend(
dict(subnet_id=s)
for s in dhcp_enabled_subnet_ids - port_subnet_ids)
# Update the port to have the calculated subnets and fixed
# IPs. The Neutron server will allocate a fresh IP for
# each subnet that doesn't already have one.
port = self.plugin.update_dhcp_port(
port.id,
{'port': {'network_id': network.id,
'fixed_ips': wanted_fixed_ips}})
if not port:
raise exceptions.Conflict()
return port
def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets):
"""Setup the reserved DHCP port, if there is one."""
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
if port:
return port
def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets):
"""Create and set up new DHCP port for the specified network."""
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Creating new one.',
{'device_id': device_id, 'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_subnets])
return self.plugin.create_dhcp_port({'port': port_dict})
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
# The ID that the DHCP port will have (or already has).
device_id = self.get_device_id(network)
# Get the set of DHCP-enabled subnets on this network.
dhcp_subnets = {subnet.id: subnet for subnet in network.subnets
if subnet.enable_dhcp}
# There are 3 cases: either the DHCP port already exists (but
# might need to be updated for a changed set of subnets); or
# some other code has already prepared a 'reserved' DHCP port,
# and we just need to adopt that; or we need to create a new
# DHCP port. Try each of those in turn until we have a DHCP
# port.
for setup_method in (self._setup_existing_dhcp_port,
self._setup_reserved_dhcp_port,
self._setup_new_dhcp_port):
dhcp_port = setup_method(network, device_id, dhcp_subnets)
if dhcp_port:
break
else:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=subnets[fixed_ip.subnet_id])
subnet=dhcp_subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item

View File

@ -0,0 +1,181 @@
# Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jinja2
import os
from oslo_config import cfg
import shutil
import six
from neutron.agent.linux import external_process
from neutron.agent.linux import pd
from neutron.agent.linux import pd_driver
from neutron.agent.linux import utils
from neutron.common import constants
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
PD_SERVICE_NAME = 'dibbler'
CONFIG_TEMPLATE = jinja2.Template("""
# Config for dibbler-client.
# Use enterprise number based duid
duid-type duid-en {{ enterprise_number }} {{ va_id }}
# 8 (Debug) is most verbose. 7 (Info) is usually the best option
log-level 8
# No automatic downlink address assignment
downlink-prefix-ifaces "none"
# Use script to notify l3_agent of assigned prefix
script {{ script_path }}
# Ask for prefix over the external gateway interface
iface {{ interface_name }} {
# Bind to generated LLA
bind-to-address {{ bind_address }}
# ask for address
pd 1
}
""")
# The first line must be #!/usr/bin/env bash
SCRIPT_TEMPLATE = jinja2.Template("""#!/usr/bin/env bash
exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }}
""")
class PDDibbler(pd_driver.PDDriverBase):
def __init__(self, router_id, subnet_id, ri_ifname):
super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname)
self.requestor_id = "%s:%s:%s" % (self.router_id,
self.subnet_id,
self.ri_ifname)
self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs,
self.requestor_id)
self.prefix_path = "%s/prefix" % self.dibbler_client_working_area
self.pid_path = "%s/client.pid" % self.dibbler_client_working_area
self.converted_subnet_id = self.subnet_id.replace('-', '')
def _is_dibbler_client_running(self):
return utils.get_value_from_file(self.pid_path)
def _generate_dibbler_conf(self, ex_gw_ifname, lla):
dcwa = self.dibbler_client_working_area
script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True)
buf = six.StringIO()
buf.write('%s' % SCRIPT_TEMPLATE.render(
prefix_path=self.prefix_path,
l3_agent_pid=os.getpid()))
utils.replace_file(script_path, buf.getvalue())
os.chmod(script_path, 0o744)
dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False)
buf = six.StringIO()
buf.write('%s' % CONFIG_TEMPLATE.render(
enterprise_number=cfg.CONF.vendor_pen,
va_id='0x%s' % self.converted_subnet_id,
script_path='"%s/notify.sh"' % dcwa,
interface_name='"%s"' % ex_gw_ifname,
bind_address='%s' % lla))
utils.replace_file(dibbler_conf, buf.getvalue())
return dcwa
def _spawn_dibbler(self, pmon, router_ns, dibbler_conf):
def callback(pid_file):
dibbler_cmd = ['dibbler-client',
'start',
'-w', '%s' % dibbler_conf]
return dibbler_cmd
pm = external_process.ProcessManager(
uuid=self.requestor_id,
default_cmd_callback=callback,
namespace=router_ns,
service=PD_SERVICE_NAME,
conf=cfg.CONF,
pid_file=self.pid_path)
pm.enable(reload_cfg=False)
pmon.register(uuid=self.requestor_id,
service_name=PD_SERVICE_NAME,
monitored_process=pm)
def enable(self, pmon, router_ns, ex_gw_ifname, lla):
LOG.debug("Enable IPv6 PD for router %s subnet %s ri_ifname %s",
self.router_id, self.subnet_id, self.ri_ifname)
if not self._is_dibbler_client_running():
dibbler_conf = self._generate_dibbler_conf(ex_gw_ifname, lla)
self._spawn_dibbler(pmon, router_ns, dibbler_conf)
LOG.debug("dibbler client enabled for router %s subnet %s"
" ri_ifname %s",
self.router_id, self.subnet_id, self.ri_ifname)
def disable(self, pmon, router_ns):
LOG.debug("Disable IPv6 PD for router %s subnet %s ri_ifname %s",
self.router_id, self.subnet_id, self.ri_ifname)
dcwa = self.dibbler_client_working_area
def callback(pid_file):
dibbler_cmd = ['dibbler-client',
'stop',
'-w', '%s' % dcwa]
return dibbler_cmd
pmon.unregister(uuid=self.requestor_id,
service_name=PD_SERVICE_NAME)
pm = external_process.ProcessManager(
uuid=self.requestor_id,
namespace=router_ns,
service=PD_SERVICE_NAME,
conf=cfg.CONF,
pid_file=self.pid_path)
pm.disable(get_stop_command=callback)
shutil.rmtree(dcwa, ignore_errors=True)
LOG.debug("dibbler client disabled for router %s subnet %s "
"ri_ifname %s",
self.router_id, self.subnet_id, self.ri_ifname)
def get_prefix(self):
prefix = utils.get_value_from_file(self.prefix_path)
if not prefix:
prefix = constants.PROVISIONAL_IPV6_PD_PREFIX
return prefix
@staticmethod
def get_sync_data():
try:
requestor_ids = os.listdir(cfg.CONF.pd_confs)
except OSError:
return []
sync_data = []
requestors = (r.split(':') for r in requestor_ids if r.count(':') == 2)
for router_id, subnet_id, ri_ifname in requestors:
pd_info = pd.PDInfo()
pd_info.router_id = router_id
pd_info.subnet_id = subnet_id
pd_info.ri_ifname = ri_ifname
pd_info.driver = PDDibbler(router_id, subnet_id, ri_ifname)
pd_info.client_started = (
pd_info.driver._is_dibbler_client_running())
pd_info.prefix = pd_info.driver.get_prefix()
sync_data.append(pd_info)
return sync_data

View File

@ -96,15 +96,20 @@ class ProcessManager(MonitoredProcess):
def reload_cfg(self):
self.disable('HUP')
def disable(self, sig='9'):
def disable(self, sig='9', get_stop_command=None):
pid = self.pid
if self.active:
cmd = ['kill', '-%s' % (sig), pid]
utils.execute(cmd, run_as_root=True)
# In the case of shutting down, remove the pid file
if sig == '9':
fileutils.delete_if_exists(self.get_pid_file_name())
if get_stop_command:
cmd = get_stop_command(self.get_pid_file_name())
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env)
else:
cmd = ['kill', '-%s' % (sig), pid]
utils.execute(cmd, run_as_root=True)
# In the case of shutting down, remove the pid file
if sig == '9':
fileutils.delete_if_exists(self.get_pid_file_name())
elif pid:
LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'signal %(signal)s', {'uuid': self.uuid, 'pid': pid,

View File

@ -25,6 +25,7 @@ from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import ipv6_utils
from neutron.i18n import _LE, _LI
@ -51,6 +52,17 @@ class LinuxInterfaceDriver(object):
def __init__(self, conf):
self.conf = conf
if self.conf.network_device_mtu:
self._validate_network_device_mtu()
def _validate_network_device_mtu(self):
if (ipv6_utils.is_enabled() and
self.conf.network_device_mtu < n_const.IPV6_MIN_MTU):
LOG.error(_LE("IPv6 protocol requires a minimum MTU of "
"%(min_mtu)s, while the configured value is "
"%(current_mtu)s"), {'min_mtu': n_const.IPV6_MIN_MTU,
'current_mtu': self.conf.network_device_mtu})
raise SystemExit(1)
def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=[], gateway_ips=None,
@ -116,6 +128,8 @@ class LinuxInterfaceDriver(object):
associated to removed ips
extra_subnets: An iterable of cidrs to add as routes without address
"""
LOG.debug("init_router_port: device_name(%s), namespace(%s)",
device_name, namespace)
self.init_l3(device_name=device_name,
ip_cidrs=ip_cidrs,
namespace=namespace,
@ -134,10 +148,41 @@ class LinuxInterfaceDriver(object):
device.route.list_onlink_routes(n_const.IP_VERSION_4) +
device.route.list_onlink_routes(n_const.IP_VERSION_6))
for route in new_onlink_routes - existing_onlink_routes:
LOG.debug("adding onlink route(%s)", route)
device.route.add_onlink_route(route)
for route in existing_onlink_routes - new_onlink_routes:
LOG.debug("deleting onlink route(%s)", route)
device.route.delete_onlink_route(route)
def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
net = netaddr.IPNetwork(v6addr)
device.addr.add(str(net), scope)
def delete_ipv6_addr(self, device_name, v6addr, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
device.delete_addr_and_conntrack_state(v6addr)
def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace):
"""Delete the first listed IPv6 address that falls within a given
prefix.
"""
device = ip_lib.IPDevice(device_name, namespace=namespace)
net = netaddr.IPNetwork(prefix)
for address in device.addr.list(scope='global', filters=['permanent']):
ip_address = netaddr.IPNetwork(address['cidr'])
if ip_address in net:
device.delete_addr_and_conntrack_state(address['cidr'])
break
def get_ipv6_llas(self, device_name, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
return device.addr.list(scope='link', ip_version=6)
def check_bridge_exists(self, bridge):
if not ip_lib.device_exists(bridge):
raise exceptions.BridgeDoesNotExist(bridge=bridge)

View File

@ -23,7 +23,8 @@ LOG = logging.getLogger(__name__)
class IpConntrackManager(object):
"""Smart wrapper for ip conntrack."""
def __init__(self, execute=None, namespace=None):
def __init__(self, zone_lookup_func, execute=None, namespace=None):
self.get_device_zone = zone_lookup_func
self.execute = execute or linux_utils.execute
self.namespace = namespace
@ -48,9 +49,7 @@ class IpConntrackManager(object):
cmd = self._generate_conntrack_cmd_by_rule(rule, self.namespace)
ethertype = rule.get('ethertype')
for device_info in device_info_list:
zone_id = device_info.get('zone_id')
if not zone_id:
continue
zone_id = self.get_device_zone(device_info['device'])
ips = device_info.get('fixed_ips', [])
for ip in ips:
net = netaddr.IPNetwork(ip)

View File

@ -152,6 +152,11 @@ class IPWrapper(SubProcessBase):
"""Delete a virtual interface between two namespaces."""
self._as_root([], 'link', ('del', name))
def add_dummy(self, name):
"""Create a Linux dummy interface with the given name."""
self._as_root([], 'link', ('add', name, 'type', 'dummy'))
return IPDevice(name, namespace=self.namespace)
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)

View File

@ -14,6 +14,8 @@
# under the License.
import collections
import re
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
@ -41,7 +43,10 @@ DIRECTION_IP_PREFIX = {firewall.INGRESS_DIRECTION: 'source_ip_prefix',
firewall.EGRESS_DIRECTION: 'dest_ip_prefix'}
IPSET_DIRECTION = {firewall.INGRESS_DIRECTION: 'src',
firewall.EGRESS_DIRECTION: 'dst'}
# length of all device prefixes (e.g. qvo, tap, qvb)
LINUX_DEV_PREFIX_LEN = 3
LINUX_DEV_LEN = 14
MAX_CONNTRACK_ZONES = 65535
comment_rule = iptables_manager.comment_rule
@ -57,7 +62,9 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
# TODO(majopela, shihanzhang): refactor out ipset to a separate
# driver composed over this one
self.ipset = ipset_manager.IpsetManager(namespace=namespace)
self.ipconntrack = ip_conntrack.IpConntrackManager(namespace=namespace)
self.ipconntrack = ip_conntrack.IpConntrackManager(
self.get_device_zone, namespace=namespace)
self._populate_initial_zone_map()
# list of port which has security group
self.filtered_ports = {}
self.unfiltered_ports = {}
@ -638,11 +645,10 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
filtered_ports)
for ip_version, remote_sg_ids in six.iteritems(remote_sgs_to_remove):
self._clear_sg_members(ip_version, remote_sg_ids)
if self.enable_ipset:
self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids)
self._remove_unused_sg_members()
self._remove_sg_members(remote_sgs_to_remove)
# Remove unused security group rules
for remove_group_id in self._determine_sg_rules_to_remove(
@ -690,23 +696,17 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
port_group_ids.update(port.get('security_groups', []))
return port_group_ids
def _clear_sg_members(self, ip_version, remote_sg_ids):
"""Clear our internal cache of sg members matching the parameters."""
for remote_sg_id in remote_sg_ids:
if self.sg_members[remote_sg_id][ip_version]:
self.sg_members[remote_sg_id][ip_version] = []
def _remove_ipsets_for_remote_sgs(self, ip_version, remote_sg_ids):
"""Remove system ipsets matching the provided parameters."""
for remote_sg_id in remote_sg_ids:
self.ipset.destroy(remote_sg_id, ip_version)
def _remove_unused_sg_members(self):
"""Remove sg_member entries where no IPv4 or IPv6 is associated."""
for sg_id in list(self.sg_members.keys()):
sg_has_members = (self.sg_members[sg_id][constants.IPv4] or
self.sg_members[sg_id][constants.IPv6])
if not sg_has_members:
def _remove_sg_members(self, remote_sgs_to_remove):
"""Remove sg_member entries."""
ipv4_sec_group_set = remote_sgs_to_remove.get(constants.IPv4)
ipv6_sec_group_set = remote_sgs_to_remove.get(constants.IPv6)
for sg_id in (ipv4_sec_group_set & ipv6_sec_group_set):
if sg_id in self.sg_members:
del self.sg_members[sg_id]
def _find_deleted_sg_rules(self, sg_id):
@ -743,7 +743,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
sec_group_change = False
device_info = self.filtered_ports.get(device)
pre_device_info = self._pre_defer_filtered_ports.get(device)
if not (device_info or pre_device_info):
if not device_info or not pre_device_info:
continue
for sg_id in pre_device_info.get('security_groups', []):
if sg_id not in device_info.get('security_groups', []):
@ -795,6 +795,68 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
self._pre_defer_filtered_ports = None
self._pre_defer_unfiltered_ports = None
def _populate_initial_zone_map(self):
"""Setup the map between devices and zones based on current rules."""
self._device_zone_map = {}
rules = self.iptables.get_rules_for_table('raw')
for rule in rules:
match = re.match(r'.* --physdev-in (?P<dev>[a-zA-Z0-9\-]+)'
r'.* -j CT --zone (?P<zone>\d+).*', rule)
if match:
# strip off any prefix that the interface is using
short_port_id = match.group('dev')[LINUX_DEV_PREFIX_LEN:]
self._device_zone_map[short_port_id] = int(match.group('zone'))
LOG.debug("Populated conntrack zone map: %s", self._device_zone_map)
def get_device_zone(self, port_id):
# we have to key the device_zone_map based on the fragment of the port
# UUID that shows up in the interface name. This is because the initial
# map is populated strictly based on interface names that we don't know
# the full UUID of.
short_port_id = port_id[:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)]
try:
return self._device_zone_map[short_port_id]
except KeyError:
self._free_zones_from_removed_ports()
return self._generate_device_zone(short_port_id)
def _free_zones_from_removed_ports(self):
"""Clears any entries from the zone map of removed ports."""
existing_ports = [
port['device'][:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)]
for port in (list(self.filtered_ports.values()) +
list(self.unfiltered_ports.values()))
]
removed = set(self._device_zone_map) - set(existing_ports)
for dev in removed:
self._device_zone_map.pop(dev, None)
def _generate_device_zone(self, short_port_id):
"""Generates a unique conntrack zone for the passed in ID."""
zone = self._find_open_zone()
self._device_zone_map[short_port_id] = zone
LOG.debug("Assigned CT zone %(z)s to port %(dev)s.",
{'z': zone, 'dev': short_port_id})
return self._device_zone_map[short_port_id]
def _find_open_zone(self):
# call set to dedup because old ports may be mapped to the same zone.
zones_in_use = sorted(set(self._device_zone_map.values()))
if not zones_in_use:
return 1
# attempt to increment onto the highest used zone first. if we hit the
# end, go back and look for any gaps left by removed devices.
last = zones_in_use[-1]
if last < MAX_CONNTRACK_ZONES:
return last + 1
for index, used in enumerate(zones_in_use):
if used - index != 1:
# gap found, let's use it!
return index + 1
# conntrack zones exhausted :( :(
raise RuntimeError("iptables conntrack zones exhausted. "
"iptables rules cannot be applied.")
class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
OVS_HYBRID_TAP_PREFIX = constants.TAP_DEVICE_PREFIX
@ -815,20 +877,18 @@ class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
else:
device = self._get_device_name(port)
jump_rule = '-m physdev --physdev-in %s -j CT --zone %s' % (
device, port['zone_id'])
device, self.get_device_zone(port['device']))
return jump_rule
def _add_raw_chain_rules(self, port, direction):
if port['zone_id']:
jump_rule = self._get_jump_rule(port, direction)
self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule)
self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule)
jump_rule = self._get_jump_rule(port, direction)
self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule)
self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule)
def _remove_raw_chain_rules(self, port, direction):
if port['zone_id']:
jump_rule = self._get_jump_rule(port, direction)
self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule)
self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule)
jump_rule = self._get_jump_rule(port, direction)
self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule)
self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule)
def _add_chain(self, port, direction):
super(OVSHybridIptablesFirewallDriver, self)._add_chain(port,

View File

@ -426,6 +426,13 @@ class IptablesManager(object):
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
return self._apply_synchronized()
def get_rules_for_table(self, table):
"""Runs iptables-save on a table and returns the results."""
args = ['iptables-save', '-t', table]
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
return self.execute(args, run_as_root=True).split('\n')
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.

356
neutron/agent/linux/pd.py Normal file
View File

@ -0,0 +1,356 @@
# Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import functools
import signal
import six
from stevedore import driver
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import utils as linux_utils
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_constants
from neutron.common import ipv6_utils
from neutron.common import utils
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('pd_dhcp_driver',
default='dibbler',
help=_('Service to handle DHCPv6 Prefix delegation.')),
]
cfg.CONF.register_opts(OPTS)
class PrefixDelegation(object):
def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb,
agent_conf):
self.context = context
self.pmon = pmon
self.intf_driver = intf_driver
self.notifier = notifier
self.routers = {}
self.pd_update_cb = pd_update_cb
self.agent_conf = agent_conf
self.pd_dhcp_driver = driver.DriverManager(
namespace='neutron.agent.linux.pd_drivers',
name=agent_conf.prefix_delegation_driver,
).driver
registry.subscribe(add_router,
resources.ROUTER,
events.BEFORE_CREATE)
registry.subscribe(remove_router,
resources.ROUTER,
events.AFTER_DELETE)
self._get_sync_data()
@utils.synchronized("l3-agent-pd")
def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac):
router = self.routers.get(router_id)
if router is None:
return
pd_info = router['subnets'].get(subnet_id)
if not pd_info:
pd_info = PDInfo(ri_ifname=ri_ifname, mac=mac)
router['subnets'][subnet_id] = pd_info
pd_info.bind_lla = self._get_lla(mac)
if pd_info.sync:
pd_info.mac = mac
pd_info.old_prefix = prefix
else:
self._add_lla(router, pd_info.get_bind_lla_with_mask())
def _delete_pd(self, router, pd_info):
self._delete_lla(router, pd_info.get_bind_lla_with_mask())
if pd_info.client_started:
pd_info.driver.disable(self.pmon, router['ns_name'])
@utils.synchronized("l3-agent-pd")
def disable_subnet(self, router_id, subnet_id):
prefix_update = {}
router = self.routers.get(router_id)
if not router:
return
pd_info = router['subnets'].get(subnet_id)
if not pd_info:
return
self._delete_pd(router, pd_info)
prefix_update[subnet_id] = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
del router['subnets'][subnet_id]
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
@utils.synchronized("l3-agent-pd")
def update_subnet(self, router_id, subnet_id, prefix):
router = self.routers.get(router_id)
if router is not None:
pd_info = router['subnets'].get(subnet_id)
if pd_info and pd_info.old_prefix != prefix:
old_prefix = pd_info.old_prefix
pd_info.old_prefix = prefix
return old_prefix
@utils.synchronized("l3-agent-pd")
def add_gw_interface(self, router_id, gw_ifname):
router = self.routers.get(router_id)
prefix_update = {}
if not router:
return
router['gw_interface'] = gw_ifname
for subnet_id, pd_info in six.iteritems(router['subnets']):
# gateway is added after internal router ports.
# If a PD is being synced, and if the prefix is available,
# send update if prefix out of sync; If not available,
# start the PD client
bind_lla_with_mask = pd_info.get_bind_lla_with_mask()
if pd_info.sync:
pd_info.sync = False
if pd_info.client_started:
if pd_info.prefix != pd_info.old_prefix:
prefix_update['subnet_id'] = pd_info.prefix
else:
self._delete_lla(router, bind_lla_with_mask)
self._add_lla(router, bind_lla_with_mask)
else:
self._add_lla(router, bind_lla_with_mask)
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
def delete_router_pd(self, router):
prefix_update = {}
for subnet_id, pd_info in six.iteritems(router['subnets']):
self._delete_lla(router, pd_info.get_bind_lla_with_mask())
if pd_info.client_started:
pd_info.driver.disable(self.pmon, router['ns_name'])
pd_info.prefix = None
pd_info.client_started = False
prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
prefix_update[subnet_id] = prefix
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
@utils.synchronized("l3-agent-pd")
def remove_gw_interface(self, router_id):
router = self.routers.get(router_id)
if router is not None:
router['gw_interface'] = None
self.delete_router_pd(router)
@utils.synchronized("l3-agent-pd")
def sync_router(self, router_id):
router = self.routers.get(router_id)
if router is not None and router['gw_interface'] is None:
self.delete_router_pd(router)
@utils.synchronized("l3-agent-pd")
def remove_stale_ri_ifname(self, router_id, stale_ifname):
router = self.routers.get(router_id)
if router is not None:
for subnet_id, pd_info in router['subnets'].items():
if pd_info.ri_ifname == stale_ifname:
self._delete_pd(router, pd_info)
del router['subnets'][subnet_id]
@staticmethod
def _get_lla(mac):
lla = ipv6_utils.get_ipv6_addr_by_EUI64(l3_constants.IPV6_LLA_PREFIX,
mac)
return lla
def _get_llas(self, gw_ifname, ns_name):
try:
return self.intf_driver.get_ipv6_llas(gw_ifname, ns_name)
except RuntimeError:
# The error message was printed as part of the driver call
# This could happen if the gw_ifname was removed
# simply return and exit the thread
return
def _add_lla(self, router, lla_with_mask):
if router['gw_interface']:
self.intf_driver.add_ipv6_addr(router['gw_interface'],
lla_with_mask,
router['ns_name'],
'link')
# There is a delay before the LLA becomes active.
# This is because the kernal runs DAD to make sure LLA uniqueness
# Spawn a thread to wait for the interface to be ready
self._spawn_lla_thread(router['gw_interface'],
router['ns_name'],
lla_with_mask)
def _spawn_lla_thread(self, gw_ifname, ns_name, lla_with_mask):
eventlet.spawn_n(self._ensure_lla_task,
gw_ifname,
ns_name,
lla_with_mask)
def _delete_lla(self, router, lla_with_mask):
if lla_with_mask and router['gw_interface']:
try:
self.intf_driver.delete_ipv6_addr(router['gw_interface'],
lla_with_mask,
router['ns_name'])
except RuntimeError:
# Ignore error if the lla doesn't exist
pass
def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask):
# It would be insane for taking so long unless DAD test failed
# In that case, the subnet would never be assigned a prefix.
linux_utils.wait_until_true(functools.partial(self._lla_available,
gw_ifname,
ns_name,
lla_with_mask),
timeout=l3_constants.LLA_TASK_TIMEOUT,
sleep=2)
def _lla_available(self, gw_ifname, ns_name, lla_with_mask):
llas = self._get_llas(gw_ifname, ns_name)
if self._is_lla_active(lla_with_mask, llas):
LOG.debug("LLA %s is active now" % lla_with_mask)
self.pd_update_cb()
return True
@staticmethod
def _is_lla_active(lla_with_mask, llas):
for lla in llas:
if lla_with_mask == lla['cidr']:
return not lla['tentative']
return False
@utils.synchronized("l3-agent-pd")
def process_prefix_update(self):
LOG.debug("Processing IPv6 PD Prefix Update")
prefix_update = {}
for router_id, router in six.iteritems(self.routers):
if not router['gw_interface']:
continue
llas = None
for subnet_id, pd_info in six.iteritems(router['subnets']):
if pd_info.client_started:
prefix = pd_info.driver.get_prefix()
if prefix != pd_info.prefix:
pd_info.prefix = prefix
prefix_update[subnet_id] = prefix
else:
if not llas:
llas = self._get_llas(router['gw_interface'],
router['ns_name'])
if self._is_lla_active(pd_info.get_bind_lla_with_mask(),
llas):
if not pd_info.driver:
pd_info.driver = self.pd_dhcp_driver(
router_id, subnet_id, pd_info.ri_ifname)
pd_info.driver.enable(self.pmon, router['ns_name'],
router['gw_interface'],
pd_info.bind_lla)
pd_info.client_started = True
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
def after_start(self):
LOG.debug('SIGHUP signal handler set')
signal.signal(signal.SIGHUP, self._handle_sighup)
def _handle_sighup(self, signum, frame):
# The external DHCPv6 client uses SIGHUP to notify agent
# of prefix changes.
self.pd_update_cb()
def _get_sync_data(self):
sync_data = self.pd_dhcp_driver.get_sync_data()
for pd_info in sync_data:
router_id = pd_info.router_id
if not self.routers.get(router_id):
self.routers[router_id] = {'gw_interface': None,
'ns_name': None,
'subnets': {}}
new_pd_info = PDInfo(pd_info=pd_info)
subnets = self.routers[router_id]['subnets']
subnets[pd_info.subnet_id] = new_pd_info
@utils.synchronized("l3-agent-pd")
def remove_router(resource, event, l3_agent, **kwargs):
router_id = kwargs['router'].router_id
router = l3_agent.pd.routers.get(router_id)
l3_agent.pd.delete_router_pd(router)
del l3_agent.pd.routers[router_id]['subnets']
del l3_agent.pd.routers[router_id]
def get_router_entry(ns_name):
return {'gw_interface': None,
'ns_name': ns_name,
'subnets': {}}
@utils.synchronized("l3-agent-pd")
def add_router(resource, event, l3_agent, **kwargs):
added_router = kwargs['router']
router = l3_agent.pd.routers.get(added_router.router_id)
if not router:
l3_agent.pd.routers[added_router.router_id] = (
get_router_entry(added_router.ns_name))
else:
# This will happen during l3 agent restart
router['ns_name'] = added_router.ns_name
class PDInfo(object):
"""A class to simplify storing and passing of information relevant to
Prefix Delegation operations for a given subnet.
"""
def __init__(self, pd_info=None, ri_ifname=None, mac=None):
if pd_info is None:
self.prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
self.old_prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
self.ri_ifname = ri_ifname
self.mac = mac
self.bind_lla = None
self.sync = False
self.driver = None
self.client_started = False
else:
self.prefix = pd_info.prefix
self.old_prefix = None
self.ri_ifname = pd_info.ri_ifname
self.mac = None
self.bind_lla = None
self.sync = True
self.driver = pd_info.driver
self.client_started = pd_info.client_started
def get_bind_lla_with_mask(self):
bind_lla_with_mask = '%s/64' % self.bind_lla
return bind_lla_with_mask

View File

@ -0,0 +1,65 @@
# Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from oslo_config import cfg
OPTS = [
cfg.StrOpt('pd_confs',
default='$state_path/pd',
help=_('Location to store IPv6 PD files.')),
cfg.StrOpt('vendor_pen',
default='8888',
help=_("A decimal value as Vendor's Registered Private "
"Enterprise Number as required by RFC3315 DUID-EN.")),
]
cfg.CONF.register_opts(OPTS)
@six.add_metaclass(abc.ABCMeta)
class PDDriverBase(object):
def __init__(self, router_id, subnet_id, ri_ifname):
self.router_id = router_id
self.subnet_id = subnet_id
self.ri_ifname = ri_ifname
@abc.abstractmethod
def enable(self, pmon, router_ns, ex_gw_ifname, lla):
"""Enable IPv6 Prefix Delegation for this PDDriver on the given
external interface, with the given link local address
"""
@abc.abstractmethod
def disable(self, pmon, router_ns):
"""Disable IPv6 Prefix Delegation for this PDDriver
"""
@abc.abstractmethod
def get_prefix(self):
"""Get the current assigned prefix for this PDDriver from the PD agent.
If no prefix is currently assigned, return
constants.PROVISIONAL_IPV6_PD_PREFIX
"""
@staticmethod
@abc.abstractmethod
def get_sync_data():
"""Get the latest router_id, subnet_id, and ri_ifname from the PD agent
so that the PDDriver can be kept up to date
"""

View File

@ -33,6 +33,7 @@ from oslo_log import log as logging
from oslo_log import loggers
from oslo_rootwrap import client
from oslo_utils import excutils
import six
from six.moves import http_client as httplib
from neutron.agent.common import config
@ -82,7 +83,6 @@ def create_process(cmd, run_as_root=False, addl_env=None):
cmd = list(map(str, addl_env_args(addl_env) + cmd))
if run_as_root:
cmd = shlex.split(config.get_root_helper(cfg.CONF)) + cmd
LOG.debug("Running command: %s", cmd)
obj = utils.subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
@ -98,7 +98,6 @@ def execute_rootwrap_daemon(cmd, process_input, addl_env):
# In practice, no neutron code should be trying to execute something that
# would throw those errors, and if it does it should be fixed as opposed to
# just logging the execution error.
LOG.debug("Running command (rootwrap daemon): %s", cmd)
client = RootwrapDaemonHelper.get_client()
return client.execute(cmd, process_input)
@ -107,29 +106,46 @@ def execute(cmd, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False, log_fail_as_error=True,
extra_ok_codes=None, run_as_root=False):
try:
if (process_input is None or
isinstance(process_input, six.binary_type)):
_process_input = process_input
else:
_process_input = process_input.encode('utf-8')
if run_as_root and cfg.CONF.AGENT.root_helper_daemon:
returncode, _stdout, _stderr = (
execute_rootwrap_daemon(cmd, process_input, addl_env))
else:
obj, cmd = create_process(cmd, run_as_root=run_as_root,
addl_env=addl_env)
_stdout, _stderr = obj.communicate(process_input)
_stdout, _stderr = obj.communicate(_process_input)
returncode = obj.returncode
obj.stdin.close()
if six.PY3:
if isinstance(_stdout, bytes):
try:
_stdout = _stdout.decode(encoding='utf-8')
except UnicodeError:
pass
if isinstance(_stderr, bytes):
try:
_stderr = _stderr.decode(encoding='utf-8')
except UnicodeError:
pass
m = _("\nCommand: {cmd}\nExit code: {code}\nStdin: {stdin}\n"
"Stdout: {stdout}\nStderr: {stderr}").format(
m = _("\nCommand: {cmd}\nExit code: {code}\n").format(
cmd=cmd,
code=returncode,
stdin=process_input or '',
stdout=_stdout,
stderr=_stderr)
code=returncode)
extra_ok_codes = extra_ok_codes or []
if returncode and returncode in extra_ok_codes:
returncode = None
if returncode and log_fail_as_error:
m += ("Stdin: {stdin}\n"
"Stdout: {stdout}\nStderr: {stderr}").format(
stdin=process_input or '',
stdout=_stdout,
stderr=_stderr)
LOG.error(m)
else:
LOG.debug(m)
@ -149,13 +165,15 @@ def get_interface_mac(interface):
MAC_START = 18
MAC_END = 24
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927,
struct.pack('256s', interface[:constants.DEVICE_NAME_MAX_LEN]))
dev = interface[:constants.DEVICE_NAME_MAX_LEN]
if isinstance(dev, six.text_type):
dev = dev.encode('utf-8')
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', dev))
return ''.join(['%02x:' % ord(char)
for char in info[MAC_START:MAC_END]])[:-1]
def replace_file(file_name, data):
def replace_file(file_name, data, file_mode=0o644):
"""Replaces the contents of file_name with data in a safe manner.
First write to a temp file and then rename. Since POSIX renames are
@ -168,7 +186,7 @@ def replace_file(file_name, data):
tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False)
tmp_file.write(data)
tmp_file.close()
os.chmod(tmp_file.name, 0o644)
os.chmod(tmp_file.name, file_mode)
os.rename(tmp_file.name, file_name)

View File

@ -269,9 +269,12 @@ class MetadataProxyHandler(object):
raise Exception(_('Unexpected response code: %s') % resp.status)
def _sign_instance_id(self, instance_id):
return hmac.new(self.conf.metadata_proxy_shared_secret,
instance_id,
hashlib.sha256).hexdigest()
secret = self.conf.metadata_proxy_shared_secret
if isinstance(secret, six.text_type):
secret = secret.encode('utf-8')
if isinstance(instance_id, six.text_type):
instance_id = instance_id.encode('utf-8')
return hmac.new(secret, instance_id, hashlib.sha256).hexdigest()
class UnixDomainMetadataProxy(object):

View File

@ -92,7 +92,7 @@ class NetworkMetadataProxyHandler(object):
response = webob.Response()
response.status = resp.status
response.headers['Content-Type'] = resp['content-type']
response.body = content
response.body = wsgi.encode_body(content)
return response
elif resp.status == 400:
return webob.exc.HTTPBadRequest()

View File

@ -95,14 +95,16 @@ class API(object):
"""
@abc.abstractmethod
def add_br(self, name, may_exist=True):
def add_br(self, name, may_exist=True, datapath_type=None):
"""Create an command to add an OVS bridge
:param name: The name of the bridge
:type name: string
:param may_exist: Do not fail if bridge already exists
:type may_exist: bool
:returns: :class:`Command` with no result
:param name: The name of the bridge
:type name: string
:param may_exist: Do not fail if bridge already exists
:type may_exist: bool
:param datapath_type: The datapath_type of the bridge
:type datapath_type: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
@ -161,6 +163,29 @@ class API(object):
:returns: :class:`Command` with field value result
"""
@abc.abstractmethod
def db_create(self, table, **col_values):
"""Create a command to create new record
:param table: The OVS table containing the record to be created
:type table: string
:param col_values: The columns and their associated values
to be set after create
:type col_values: Dictionary of columns id's and values
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_destroy(self, table, record):
"""Create a command to destroy a record
:param table: The OVS table containing the record to be destroyed
:type table: string
:param record: The record id (name/uuid) to be destroyed
:type record: uuid/string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_set(self, table, record, *col_values):
"""Create a command to set fields in a record

View File

@ -144,8 +144,8 @@ class OvsdbIdl(api.API):
self.context.vsctl_timeout,
check_error, log_errors)
def add_br(self, name, may_exist=True):
return cmd.AddBridgeCommand(self, name, may_exist)
def add_br(self, name, may_exist=True, datapath_type=None):
return cmd.AddBridgeCommand(self, name, may_exist, datapath_type)
def del_br(self, name, if_exists=True):
return cmd.DelBridgeCommand(self, name, if_exists)
@ -168,6 +168,12 @@ class OvsdbIdl(api.API):
def br_set_external_id(self, name, field, value):
return cmd.BrSetExternalIdCommand(self, name, field, value)
def db_create(self, table, **col_values):
return cmd.DbCreateCommand(self, table, **col_values)
def db_destroy(self, table, record):
return cmd.DbDestroyCommand(self, table, record)
def db_set(self, table, record, *col_values):
return cmd.DbSetCommand(self, table, record, *col_values)

View File

@ -160,9 +160,13 @@ class OvsdbVsctl(ovsdb.API):
def transaction(self, check_error=False, log_errors=True, **kwargs):
return Transaction(self.context, check_error, log_errors, **kwargs)
def add_br(self, name, may_exist=True):
def add_br(self, name, may_exist=True, datapath_type=None):
opts = ['--may-exist'] if may_exist else None
return BaseCommand(self.context, 'add-br', opts, [name])
params = [name]
if datapath_type:
params += ['--', 'set', 'Bridge', name,
'datapath_type=%s' % datapath_type]
return BaseCommand(self.context, 'add-br', opts, params)
def del_br(self, name, if_exists=True):
opts = ['--if-exists'] if if_exists else None
@ -184,6 +188,15 @@ class OvsdbVsctl(ovsdb.API):
return BaseCommand(self.context, 'br-get-external-id',
args=[name, field])
def db_create(self, table, **col_values):
args = [table]
args += _set_colval_args(*col_values.items())
return BaseCommand(self.context, 'create', args=args)
def db_destroy(self, table, record):
args = [table, record]
return BaseCommand(self.context, 'destroy', args=args)
def db_set(self, table, record, *col_values):
args = [table, record]
args += _set_colval_args(*col_values)
@ -259,8 +272,11 @@ def _set_colval_args(*col_values):
col, k, op, ovsdb.py_to_val(v)) for k, v in val.items()]
elif (isinstance(val, collections.Sequence)
and not isinstance(val, six.string_types)):
args.append(
"%s%s%s" % (col, op, ",".join(map(ovsdb.py_to_val, val))))
if len(val) == 0:
args.append("%s%s%s" % (col, op, "[]"))
else:
args.append(
"%s%s%s" % (col, op, ",".join(map(ovsdb.py_to_val, val))))
else:
args.append("%s%s%s" % (col, op, ovsdb.py_to_val(val)))
return args

View File

@ -50,10 +50,11 @@ class BaseCommand(api.Command):
class AddBridgeCommand(BaseCommand):
def __init__(self, api, name, may_exist):
def __init__(self, api, name, may_exist, datapath_type):
super(AddBridgeCommand, self).__init__(api)
self.name = name
self.may_exist = may_exist
self.datapath_type = datapath_type
def run_idl(self, txn):
if self.may_exist:
@ -63,6 +64,8 @@ class AddBridgeCommand(BaseCommand):
return
row = txn.insert(self.api._tables['Bridge'])
row.name = self.name
if self.datapath_type:
row.datapath_type = self.datapath_type
self.api._ovs.verify('bridges')
self.api._ovs.bridges = self.api._ovs.bridges + [row]
@ -148,6 +151,30 @@ class BrSetExternalIdCommand(BaseCommand):
br.external_ids = external_ids
class DbCreateCommand(BaseCommand):
def __init__(self, api, table, **columns):
super(DbCreateCommand, self).__init__(api)
self.table = table
self.columns = columns
def run_idl(self, txn):
row = txn.insert(self.api._tables[self.table])
for col, val in self.columns.items():
setattr(row, col, val)
self.result = row
class DbDestroyCommand(BaseCommand):
def __init__(self, api, table, record):
super(DbDestroyCommand, self).__init__(api)
self.table = table
self.record = record
def run_idl(self, txn):
record = idlutils.row_by_record(self.api.idl, self.table, self.record)
record.delete()
class DbSetCommand(BaseCommand):
def __init__(self, api, table, record, *col_values):
super(DbSetCommand, self).__init__(api)

View File

@ -110,23 +110,6 @@ class SecurityGroupAgentRpc(object):
self.global_refresh_firewall = False
self._use_enhanced_rpc = None
def set_local_zone(self, device):
"""Set local zone id for device
In order to separate conntrack in different networks, a local zone
id is needed to generate related iptables rules. This routine sets
zone id to device according to the network it belongs to. For OVS
agent, vlan id of each network can be used as zone id.
:param device: dictionary of device information, get network id by
device['network_id'], and set zone id by device['zone_id']
"""
net_id = device['network_id']
zone_id = None
if self.local_vlan_map and net_id in self.local_vlan_map:
zone_id = self.local_vlan_map[net_id].vlan
device['zone_id'] = zone_id
@property
def use_enhanced_rpc(self):
if self._use_enhanced_rpc is None:
@ -176,7 +159,6 @@ class SecurityGroupAgentRpc(object):
with self.firewall.defer_apply():
for device in devices.values():
self.set_local_zone(device)
self.firewall.prepare_port_filter(device)
if self.use_enhanced_rpc:
LOG.debug("Update security group information for ports %s",
@ -267,7 +249,6 @@ class SecurityGroupAgentRpc(object):
with self.firewall.defer_apply():
for device in devices.values():
LOG.debug("Update port filter for %s", device['device'])
self.set_local_zone(device)
self.firewall.update_port_filter(device)
if self.use_enhanced_rpc:
LOG.debug("Update security group information for ports %s",

View File

@ -18,6 +18,7 @@ import os
from eventlet.green import subprocess
from eventlet import greenthread
from oslo_log import log as logging
import six
from neutron.common import utils
@ -45,12 +46,29 @@ def create_process(cmd, addl_env=None):
def execute(cmd, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False, log_fail_as_error=True,
extra_ok_codes=None, run_as_root=False):
extra_ok_codes=None, run_as_root=False, do_decode=True):
try:
if (process_input is None or
isinstance(process_input, six.binary_type)):
_process_input = process_input
else:
_process_input = process_input.encode('utf-8')
obj, cmd = create_process(cmd, addl_env=addl_env)
_stdout, _stderr = obj.communicate(process_input)
_stdout, _stderr = obj.communicate(_process_input)
obj.stdin.close()
if six.PY3:
if isinstance(_stdout, bytes):
try:
_stdout = _stdout.decode(encoding='utf-8')
except UnicodeError:
pass
if isinstance(_stderr, bytes):
try:
_stderr = _stderr.decode(encoding='utf-8')
except UnicodeError:
pass
m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdin: %(stdin)s\n"
"Stdout: %(stdout)s\nStderr: %(stderr)s") % \
{'cmd': cmd,

View File

@ -273,7 +273,17 @@ class SortingEmulatedHelper(SortingHelper):
def sort(self, items):
def cmp_func(obj1, obj2):
for key, direction in self.sort_dict:
ret = (obj1[key] > obj2[key]) - (obj1[key] < obj2[key])
o1 = obj1[key]
o2 = obj2[key]
if o1 is None and o2 is None:
ret = 0
elif o1 is None and o2 is not None:
ret = -1
elif o1 is not None and o2 is None:
ret = 1
else:
ret = (o1 > o2) - (o1 < o2)
if ret:
return ret * (1 if direction else -1)
return 0

View File

@ -17,7 +17,6 @@
import abc
import collections
import imp
import itertools
import os
from oslo_config import cfg
@ -560,10 +559,7 @@ class PluginAwareExtensionManager(ExtensionManager):
def _plugins_support(self, extension):
alias = extension.get_alias()
supports_extension = any((hasattr(plugin,
"supported_extension_aliases") and
alias in plugin.supported_extension_aliases)
for plugin in self.plugins.values())
supports_extension = alias in self.get_supported_extension_aliases()
if not supports_extension:
LOG.warn(_LW("Extension %s not supported by any of loaded "
"plugins"),
@ -588,11 +584,25 @@ class PluginAwareExtensionManager(ExtensionManager):
manager.NeutronManager.get_service_plugins())
return cls._instance
def get_supported_extension_aliases(self):
"""Gets extension aliases supported by all plugins."""
aliases = set()
for plugin in self.plugins.values():
# we also check all classes that the plugins inherit to see if they
# directly provide support for an extension
for item in [plugin] + plugin.__class__.mro():
try:
aliases |= set(
getattr(item, "supported_extension_aliases", []))
except TypeError:
# we land here if a class has an @property decorator for
# supported extension aliases. They only work on objects.
pass
return aliases
def check_if_plugin_extensions_loaded(self):
"""Check if an extension supported by a plugin has been loaded."""
plugin_extensions = set(itertools.chain.from_iterable([
getattr(plugin, "supported_extension_aliases", [])
for plugin in self.plugins.values()]))
plugin_extensions = self.get_supported_extension_aliases()
missing_aliases = plugin_extensions - set(self.extensions)
if missing_aliases:
raise exceptions.ExtensionsNotFound(

View File

@ -100,7 +100,7 @@ class L3AgentNotifyAPI(object):
cctxt.cast(context, method, payload=dvr_arptable)
def _notification(self, context, method, router_ids, operation,
shuffle_agents):
shuffle_agents, schedule_routers=True):
"""Notify all the agents that are hosting the routers."""
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
@ -112,7 +112,8 @@ class L3AgentNotifyAPI(object):
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
adminContext = (context.is_admin and
context or context.elevated())
plugin.schedule_routers(adminContext, router_ids)
if schedule_routers:
plugin.schedule_routers(adminContext, router_ids)
self._agent_notification(
context, method, router_ids, operation, shuffle_agents)
else:
@ -138,10 +139,10 @@ class L3AgentNotifyAPI(object):
self._notification_fanout(context, 'router_deleted', router_id)
def routers_updated(self, context, router_ids, operation=None, data=None,
shuffle_agents=False):
shuffle_agents=False, schedule_routers=True):
if router_ids:
self._notification(context, 'routers_updated', router_ids,
operation, shuffle_agents)
operation, shuffle_agents, schedule_routers)
def add_arp_entry(self, context, router_id, arp_table, operation=None):
self._agent_notification_arp(context, 'add_arp_entry', router_id,

View File

@ -0,0 +1,44 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.api.rpc.callbacks import resource_manager
LOG = logging.getLogger(__name__)
#TODO(ajo): consider adding locking to _get_manager, it's
# safe for eventlet, but not for normal threading.
def _get_manager():
return resource_manager.ConsumerResourceCallbacksManager()
def subscribe(callback, resource_type):
_get_manager().register(callback, resource_type)
def unsubscribe(callback, resource_type):
_get_manager().unregister(callback, resource_type)
def push(resource_type, resource, event_type):
"""Push resource events into all registered callbacks for the type."""
callbacks = _get_manager().get_callbacks(resource_type)
for callback in callbacks:
callback(resource, event_type)
def clear():
_get_manager().clear()

View File

@ -1,6 +1,3 @@
# Copyright 2013 OpenStack Foundation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -13,12 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
ML2 Mechanism Driver for Cisco Nexus platforms.
"""
CREATED = 'created'
UPDATED = 'updated'
DELETED = 'deleted'
from networking_cisco.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
class CiscoNexusMechanismDriver(mech_cisco_nexus.CiscoNexusMechanismDriver):
pass
VALID = (
CREATED,
UPDATED,
DELETED
)

View File

@ -1,7 +1,3 @@
# Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -17,10 +13,13 @@
from neutron.common import exceptions
class SdnveException(exceptions.NeutronException):
message = _("An unexpected error occurred in the SDN-VE Plugin. "
"Here is the error message: %(msg)s")
class CallbackWrongResourceType(exceptions.NeutronException):
message = _('Callback for %(resource_type)s returned wrong resource type')
class BadInputException(exceptions.BadRequest):
message = _("The input does not contain nececessary info: %(msg)s")
class CallbackNotFound(exceptions.NeutronException):
message = _('Callback for %(resource_type)s not found')
class CallbacksMaxLimitReached(exceptions.NeutronException):
message = _("Cannot add multiple callbacks for %(resource_type)s")

View File

@ -0,0 +1,62 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.api.rpc.callbacks import exceptions
from neutron.api.rpc.callbacks import resource_manager
from neutron.objects import base
LOG = logging.getLogger(__name__)
# TODO(ajo): consider adding locking: it's safe for eventlet but not
# for other types of threading.
def _get_manager():
return resource_manager.ProducerResourceCallbacksManager()
def provide(callback, resource_type):
"""Register a callback as a producer for the resource type.
This callback will be used to produce resources of corresponding type for
interested parties.
"""
_get_manager().register(callback, resource_type)
def unprovide(callback, resource_type):
"""Unregister a callback for corresponding resource type."""
_get_manager().unregister(callback, resource_type)
def clear():
"""Clear all callbacks."""
_get_manager().clear()
def pull(resource_type, resource_id, **kwargs):
"""Get resource object that corresponds to resource id.
The function will return an object that is provided by resource producer.
:returns: NeutronObject
"""
callback = _get_manager().get_callback(resource_type)
obj = callback(resource_type, resource_id, **kwargs)
if obj:
if (not isinstance(obj, base.NeutronObject) or
resource_type != obj.obj_name()):
raise exceptions.CallbackWrongResourceType(
resource_type=resource_type)
return obj

View File

@ -0,0 +1,139 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from oslo_log import log as logging
import six
from neutron.api.rpc.callbacks import exceptions as rpc_exc
from neutron.api.rpc.callbacks import resources
from neutron.callbacks import exceptions
LOG = logging.getLogger(__name__)
# TODO(QoS): split the registry/resources_rpc modules into two separate things:
# one for pull and one for push APIs
def _validate_resource_type(resource_type):
if not resources.is_valid_resource_type(resource_type):
raise exceptions.Invalid(element='resource', value=resource_type)
@six.add_metaclass(abc.ABCMeta)
class ResourceCallbacksManager(object):
"""A callback system that allows information providers in a loose manner.
"""
# This hook is to allow tests to get new objects for the class
_singleton = True
def __new__(cls, *args, **kwargs):
if not cls._singleton:
return super(ResourceCallbacksManager, cls).__new__(cls)
if not hasattr(cls, '_instance'):
cls._instance = super(ResourceCallbacksManager, cls).__new__(cls)
return cls._instance
@abc.abstractmethod
def _add_callback(self, callback, resource_type):
pass
@abc.abstractmethod
def _delete_callback(self, callback, resource_type):
pass
def register(self, callback, resource_type):
"""Register a callback for a resource type.
:param callback: the callback. It must raise or return NeutronObject.
:param resource_type: must be a valid resource type.
"""
LOG.debug("Registering callback for %s", resource_type)
_validate_resource_type(resource_type)
self._add_callback(callback, resource_type)
def unregister(self, callback, resource_type):
"""Unregister callback from the registry.
:param callback: the callback.
:param resource_type: must be a valid resource type.
"""
LOG.debug("Unregistering callback for %s", resource_type)
_validate_resource_type(resource_type)
self._delete_callback(callback, resource_type)
@abc.abstractmethod
def clear(self):
"""Brings the manager to a clean state."""
def get_subscribed_types(self):
return list(self._callbacks.keys())
class ProducerResourceCallbacksManager(ResourceCallbacksManager):
_callbacks = dict()
def _add_callback(self, callback, resource_type):
if resource_type in self._callbacks:
raise rpc_exc.CallbacksMaxLimitReached(resource_type=resource_type)
self._callbacks[resource_type] = callback
def _delete_callback(self, callback, resource_type):
try:
del self._callbacks[resource_type]
except KeyError:
raise rpc_exc.CallbackNotFound(resource_type=resource_type)
def clear(self):
self._callbacks = dict()
def get_callback(self, resource_type):
_validate_resource_type(resource_type)
try:
return self._callbacks[resource_type]
except KeyError:
raise rpc_exc.CallbackNotFound(resource_type=resource_type)
class ConsumerResourceCallbacksManager(ResourceCallbacksManager):
_callbacks = collections.defaultdict(set)
def _add_callback(self, callback, resource_type):
self._callbacks[resource_type].add(callback)
def _delete_callback(self, callback, resource_type):
try:
self._callbacks[resource_type].remove(callback)
if not self._callbacks[resource_type]:
del self._callbacks[resource_type]
except KeyError:
raise rpc_exc.CallbackNotFound(resource_type=resource_type)
def clear(self):
self._callbacks = collections.defaultdict(set)
def get_callbacks(self, resource_type):
"""Return the callback if found, None otherwise.
:param resource_type: must be a valid resource type.
"""
_validate_resource_type(resource_type)
callbacks = self._callbacks[resource_type]
if not callbacks:
raise rpc_exc.CallbackNotFound(resource_type=resource_type)
return callbacks

View File

@ -0,0 +1,49 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.objects.qos import policy
_QOS_POLICY_CLS = policy.QosPolicy
_VALID_CLS = (
_QOS_POLICY_CLS,
)
_VALID_TYPES = [cls.obj_name() for cls in _VALID_CLS]
# Supported types
QOS_POLICY = _QOS_POLICY_CLS.obj_name()
_TYPE_TO_CLS_MAP = {
QOS_POLICY: _QOS_POLICY_CLS,
}
def get_resource_type(resource_cls):
if not resource_cls:
return None
if not hasattr(resource_cls, 'obj_name'):
return None
return resource_cls.obj_name()
def is_valid_resource_type(resource_type):
return resource_type in _VALID_TYPES
def get_resource_cls(resource_type):
return _TYPE_TO_CLS_MAP.get(resource_type)

View File

@ -30,8 +30,10 @@ from neutron.db import api as db_api
from neutron.extensions import portbindings
from neutron.i18n import _LW
from neutron import manager
from neutron.plugins.common import utils as p_utils
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
@ -77,7 +79,7 @@ class DhcpRpcCallback(object):
"""Perform port operations taking care of concurrency issues."""
try:
if action == 'create_port':
return plugin.create_port(context, port)
return p_utils.create_port(plugin, context, port)
elif action == 'update_port':
return plugin.update_port(context, port['id'], port)
else:

View File

@ -32,6 +32,9 @@ class DVRServerRpcApi(object):
can be found below: DVRServerRpcCallback. For more information on changing
rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
# 1.0 Initial Version
# 1.1 Support for passing 'fixed_ips' in get_subnet_for_dvr function.
# Passing 'subnet" will be deprecated in the next release.
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0',
@ -55,9 +58,10 @@ class DVRServerRpcApi(object):
host=host, subnet=subnet)
@log_helpers.log_method_call
def get_subnet_for_dvr(self, context, subnet):
def get_subnet_for_dvr(self, context, subnet, fixed_ips):
cctxt = self.client.prepare()
return cctxt.call(context, 'get_subnet_for_dvr', subnet=subnet)
return cctxt.call(
context, 'get_subnet_for_dvr', subnet=subnet, fixed_ips=fixed_ips)
class DVRServerRpcCallback(object):
@ -70,8 +74,10 @@ class DVRServerRpcCallback(object):
# History
# 1.0 Initial version
# 1.1 Support for passing the 'fixed_ips" in get_subnet_for_dvr.
# Passing subnet will be deprecated in the next release.
target = oslo_messaging.Target(version='1.0',
target = oslo_messaging.Target(version='1.1',
namespace=constants.RPC_NAMESPACE_DVR)
@property
@ -96,8 +102,10 @@ class DVRServerRpcCallback(object):
host, subnet)
def get_subnet_for_dvr(self, context, **kwargs):
fixed_ips = kwargs.get('fixed_ips')
subnet = kwargs.get('subnet')
return self.plugin.get_subnet_for_dvr(context, subnet)
return self.plugin.get_subnet_for_dvr(
context, subnet, fixed_ips=fixed_ips)
class DVRAgentRpcApiMixin(object):

View File

@ -98,13 +98,16 @@ class L3RpcCallback(object):
LOG.debug("Checking router: %(id)s for host: %(host)s",
{'id': router['id'], 'host': host})
if router.get('gw_port') and router.get('distributed'):
# '' is used to effectively clear binding of a gw port if not
# bound (snat is not hosted on any l3 agent)
gw_port_host = router.get('gw_port_host') or ''
self._ensure_host_set_on_port(context,
router.get('gw_port_host'),
gw_port_host,
router.get('gw_port'),
router['id'])
for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []):
self._ensure_host_set_on_port(context,
router.get('gw_port_host'),
gw_port_host,
p, router['id'])
else:
self._ensure_host_set_on_port(
@ -143,6 +146,8 @@ class L3RpcCallback(object):
context,
port['id'],
{'port': {portbindings.HOST_ID: host}})
# updating port's host to pass actual info to l3 agent
port[portbindings.HOST_ID] = host
except exceptions.PortNotFound:
LOG.debug("Port %(port)s not found while updating "
"agent binding for router %(router)s.",
@ -269,6 +274,10 @@ class L3RpcCallback(object):
def process_prefix_update(self, context, **kwargs):
subnets = kwargs.get('subnets')
updated_subnets = []
for subnet_id, prefix in subnets.items():
self.plugin.update_subnet(context, subnet_id,
{'subnet': {'cidr': prefix}})
updated_subnets.append(self.plugin.update_subnet(
context,
subnet_id,
{'subnet': {'cidr': prefix}}))
return updated_subnets

View File

@ -0,0 +1,174 @@
# Copyright (c) 2015 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
import oslo_messaging
from neutron.api.rpc.callbacks.consumer import registry as cons_registry
from neutron.api.rpc.callbacks.producer import registry as prod_registry
from neutron.api.rpc.callbacks import resources
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.objects import base as obj_base
LOG = logging.getLogger(__name__)
class ResourcesRpcError(exceptions.NeutronException):
pass
class InvalidResourceTypeClass(ResourcesRpcError):
message = _("Invalid resource type %(resource_type)s")
class ResourceNotFound(ResourcesRpcError):
message = _("Resource %(resource_id)s of type %(resource_type)s "
"not found")
def _validate_resource_type(resource_type):
if not resources.is_valid_resource_type(resource_type):
raise InvalidResourceTypeClass(resource_type=resource_type)
def resource_type_versioned_topic(resource_type):
_validate_resource_type(resource_type)
cls = resources.get_resource_cls(resource_type)
return topics.RESOURCE_TOPIC_PATTERN % {'resource_type': resource_type,
'version': cls.VERSION}
class ResourcesPullRpcApi(object):
"""Agent-side RPC (stub) for agent-to-plugin interaction.
This class implements the client side of an rpc interface. The server side
can be found below: ResourcesPullRpcCallback. For more information on
this RPC interface, see doc/source/devref/rpc_callbacks.rst.
"""
def __new__(cls):
# make it a singleton
if not hasattr(cls, '_instance'):
cls._instance = super(ResourcesPullRpcApi, cls).__new__(cls)
target = oslo_messaging.Target(
topic=topics.PLUGIN, version='1.0',
namespace=constants.RPC_NAMESPACE_RESOURCES)
cls._instance.client = n_rpc.get_client(target)
return cls._instance
@log_helpers.log_method_call
def pull(self, context, resource_type, resource_id):
_validate_resource_type(resource_type)
# we've already validated the resource type, so we are pretty sure the
# class is there => no need to validate it specifically
resource_type_cls = resources.get_resource_cls(resource_type)
cctxt = self.client.prepare()
primitive = cctxt.call(context, 'pull',
resource_type=resource_type,
version=resource_type_cls.VERSION, resource_id=resource_id)
if primitive is None:
raise ResourceNotFound(resource_type=resource_type,
resource_id=resource_id)
return resource_type_cls.clean_obj_from_primitive(primitive)
class ResourcesPullRpcCallback(object):
"""Plugin-side RPC (implementation) for agent-to-plugin interaction.
This class implements the server side of an rpc interface. The client side
can be found above: ResourcesPullRpcApi. For more information on
this RPC interface, see doc/source/devref/rpc_callbacks.rst.
"""
# History
# 1.0 Initial version
target = oslo_messaging.Target(
version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES)
def pull(self, context, resource_type, version, resource_id):
obj = prod_registry.pull(resource_type, resource_id, context=context)
if obj:
#TODO(QoS): Remove in the future with new version of
# versionedobjects containing
# https://review.openstack.org/#/c/207998/
if version == obj.VERSION:
version = None
return obj.obj_to_primitive(target_version=version)
class ResourcesPushRpcApi(object):
"""Plugin-side RPC for plugin-to-agents interaction.
This interface is designed to push versioned object updates to interested
agents using fanout topics.
This class implements the caller side of an rpc interface. The receiver
side can be found below: ResourcesPushRpcCallback.
"""
def __init__(self):
target = oslo_messaging.Target(
version='1.0',
namespace=constants.RPC_NAMESPACE_RESOURCES)
self.client = n_rpc.get_client(target)
def _prepare_object_fanout_context(self, obj):
"""Prepare fanout context, one topic per object type."""
obj_topic = resource_type_versioned_topic(obj.obj_name())
return self.client.prepare(fanout=True, topic=obj_topic)
@log_helpers.log_method_call
def push(self, context, resource, event_type):
resource_type = resources.get_resource_type(resource)
_validate_resource_type(resource_type)
cctxt = self._prepare_object_fanout_context(resource)
#TODO(QoS): Push notifications for every known version once we have
# multiple of those
dehydrated_resource = resource.obj_to_primitive()
cctxt.cast(context, 'push',
resource=dehydrated_resource,
event_type=event_type)
class ResourcesPushRpcCallback(object):
"""Agent-side RPC for plugin-to-agents interaction.
This class implements the receiver for notification about versioned objects
resource updates used by neutron.api.rpc.callbacks. You can find the
caller side in ResourcesPushRpcApi.
"""
# History
# 1.0 Initial version
target = oslo_messaging.Target(version='1.0',
namespace=constants.RPC_NAMESPACE_RESOURCES)
def push(self, context, resource, event_type):
resource_obj = obj_base.NeutronObject.clean_obj_from_primitive(
resource)
LOG.debug("Resources notification (%(event_type)s): %(resource)s",
{'event_type': event_type, 'resource': repr(resource_obj)})
resource_type = resources.get_resource_type(resource_obj)
cons_registry.push(resource_type, resource_obj, event_type)

View File

@ -19,6 +19,7 @@ import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
import webob.exc
from neutron.common import constants
from neutron.common import exceptions as n_exc
@ -170,6 +171,10 @@ def _validate_mac_address(data, valid_values=None):
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
valid_mac = False
if valid_mac:
valid_mac = not netaddr.EUI(data) in map(netaddr.EUI,
constants.INVALID_MAC_ADDRESSES)
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
@ -825,7 +830,7 @@ RESOURCE_ATTRIBUTE_MAP = {
'is_visible': True},
'tenant_id': {'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'prefixes': {'allow_post': True,
@ -884,3 +889,65 @@ PLURALS = {NETWORKS: NETWORK,
'allocation_pools': 'allocation_pool',
'fixed_ips': 'fixed_ip',
'extensions': 'extension'}
def fill_default_value(attr_info, res_dict,
exc_cls=ValueError,
check_allow_post=True):
for attr, attr_vals in six.iteritems(attr_info):
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise exc_cls(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
elif check_allow_post:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise exc_cls(msg)
def convert_value(attr_info, res_dict, exc_cls=ValueError):
for attr, attr_vals in six.iteritems(attr_info):
if (attr not in res_dict or
res_dict[attr] is ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = validators[rule](res_dict[attr], attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise exc_cls(msg)
def populate_tenant_id(context, res_dict, attr_info, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
elif 'tenant_id' in attr_info:
msg = _("Running without keystone AuthN requires "
"that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
def verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)

View File

@ -194,7 +194,12 @@ class Controller(object):
policy.init()
# Fetch the resource and verify if the user can access it
try:
resource = self._item(request, id, True)
parent_id = kwargs.get(self._parent_id_name)
resource = self._item(request,
id,
do_authz=True,
field_list=None,
parent_id=parent_id)
except oslo_policy.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
@ -596,23 +601,6 @@ class Controller(object):
self._send_nova_notification(action, orig_object_copy, result)
return result
@staticmethod
def _populate_tenant_id(context, res_dict, attr_info, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
elif 'tenant_id' in attr_info:
msg = _("Running without keystone AuthN requires "
"that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
@ -652,56 +640,21 @@ class Controller(object):
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
Controller._populate_tenant_id(context, res_dict, attr_info, is_create)
Controller._verify_attributes(res_dict, attr_info)
attributes.populate_tenant_id(context, res_dict, attr_info, is_create)
attributes.verify_attributes(res_dict, attr_info)
if is_create: # POST
for attr, attr_vals in six.iteritems(attr_info):
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
attributes.fill_default_value(attr_info, res_dict,
webob.exc.HTTPBadRequest)
else: # PUT
for attr, attr_vals in six.iteritems(attr_info):
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in six.iteritems(attr_info):
if (attr not in res_dict or
res_dict[attr] is attributes.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = attributes.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise webob.exc.HTTPBadRequest(msg)
attributes.convert_value(attr_info, res_dict, webob.exc.HTTPBadRequest)
return body
@staticmethod
def _verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine

Some files were not shown because too many files have changed in this diff Show More